diff --git a/.github/workflows/astria-build-and-publish-image.yml b/.github/workflows/astria-build-and-publish-image.yml
index 73654354e..3329474bb 100644
--- a/.github/workflows/astria-build-and-publish-image.yml
+++ b/.github/workflows/astria-build-and-publish-image.yml
@@ -72,4 +72,4 @@ jobs:
push: true
tags: ${{ steps.metadata.outputs.tags }}
labels: ${{ steps.metadata.outputs.labels }}
- project: w2d6w0spqz
\ No newline at end of file
+ project: w2d6w0spqz
diff --git a/cmd/geth/config.go b/cmd/geth/config.go
index fa122f8cc..c59520ae6 100644
--- a/cmd/geth/config.go
+++ b/cmd/geth/config.go
@@ -21,6 +21,8 @@ import (
"errors"
"fmt"
"github.com/ethereum/go-ethereum/eth/catalyst"
+ "github.com/ethereum/go-ethereum/grpc/optimistic"
+ "github.com/ethereum/go-ethereum/grpc/shared"
"os"
"reflect"
"runtime"
@@ -206,11 +208,16 @@ func makeFullNode(ctx *cli.Context) *node.Node {
// Configure gRPC if requested.
if ctx.IsSet(utils.GRPCEnabledFlag.Name) {
- serviceV1, err := execution.NewExecutionServiceServerV1(eth)
+ sharedService, err := shared.NewSharedServiceContainer(eth)
if err != nil {
- utils.Fatalf("failed to create execution service: %v", err)
+ utils.Fatalf("failed to create shared service container: %v", err)
}
- utils.RegisterGRPCExecutionService(stack, serviceV1, &cfg.Node)
+
+ serviceV1a2 := execution.NewExecutionServiceServerV1(sharedService)
+
+ optimisticServiceV1a1 := optimistic.NewOptimisticServiceV1Alpha(sharedService)
+
+ utils.RegisterGRPCServices(stack, serviceV1a2, optimisticServiceV1a1, optimisticServiceV1a1, &cfg.Node)
}
// Add the Ethereum Stats daemon if requested.
diff --git a/cmd/geth/main.go b/cmd/geth/main.go
index 162655190..a417a9753 100644
--- a/cmd/geth/main.go
+++ b/cmd/geth/main.go
@@ -123,6 +123,7 @@ var (
utils.MinerRecommitIntervalFlag,
utils.MinerPendingFeeRecipientFlag,
utils.MinerNewPayloadTimeoutFlag, // deprecated
+ utils.AuctioneerEnabledFlag,
utils.NATFlag,
utils.NoDiscoverFlag,
utils.DiscoveryV4Flag,
diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go
index ebe359561..f2410a9ce 100644
--- a/cmd/utils/flags.go
+++ b/cmd/utils/flags.go
@@ -18,6 +18,7 @@
package utils
import (
+ optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc"
"context"
"crypto/ecdsa"
"encoding/hex"
@@ -769,6 +770,13 @@ var (
Category: flags.APICategory,
}
+ // auctioneer
+ AuctioneerEnabledFlag = &cli.BoolFlag{
+ Name: "auctioneer",
+ Usage: "Enable the auctioneer server",
+ Category: flags.MinerCategory,
+ }
+
// Network Settings
MaxPeersFlag = &cli.IntFlag{
Name: "maxpeers",
@@ -1438,6 +1446,12 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) {
SetDataDir(ctx, cfg)
setSmartCard(ctx, cfg)
+ if ctx.Bool(AuctioneerEnabledFlag.Name) {
+ cfg.EnableAuctioneer = true
+ } else {
+ cfg.EnableAuctioneer = false
+ }
+
if ctx.IsSet(JWTSecretFlag.Name) {
cfg.JWTSecret = ctx.String(JWTSecretFlag.Name)
}
@@ -1987,10 +2001,10 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst
}
}
-// RegisterGRPCExecutionService adds the gRPC API to the node.
+// RegisterGRPCServices adds the gRPC API to the node.
// It was done this way so that our grpc execution server can access the ethapi.Backend
-func RegisterGRPCExecutionService(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, cfg *node.Config) {
- if err := node.NewGRPCServerHandler(stack, execServ, cfg); err != nil {
+func RegisterGRPCServices(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecutionServ optimisticGrpc.OptimisticExecutionServiceServer, auctionServiceServer optimisticGrpc.AuctionServiceServer, cfg *node.Config) {
+ if err := node.NewGRPCServerHandler(stack, execServ, optimisticExecutionServ, auctionServiceServer, cfg); err != nil {
Fatalf("Failed to register the gRPC service: %v", err)
}
}
diff --git a/core/events.go b/core/events.go
index 4f4c01e3b..4afcadb1b 100644
--- a/core/events.go
+++ b/core/events.go
@@ -24,6 +24,12 @@ import (
// NewTxsEvent is posted when a batch of transactions enter the transaction pool.
type NewTxsEvent struct{ Txs []*types.Transaction }
+// NewMempoolClearedEvent is posted when the mempool is cleared after a head reset for trusted auctioneer
+type NewMempoolCleared struct {
+ // the new head to which the mempool state was reset to before clearing the mempool
+ NewHead *types.Header
+}
+
// NewMinedBlockEvent is posted when a block has been imported.
type NewMinedBlockEvent struct{ Block *types.Block }
diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go
index 1305b6a07..30b507f08 100644
--- a/core/txpool/blobpool/blobpool.go
+++ b/core/txpool/blobpool/blobpool.go
@@ -1602,6 +1602,10 @@ func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool
}
}
+func (p *BlobPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription {
+ return nil
+}
+
// Nonce returns the next nonce of an account, with all transactions executable
// by the pool already applied on top.
func (p *BlobPool) Nonce(addr common.Address) uint64 {
diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go
index c691c1807..b1397292a 100644
--- a/core/txpool/legacypool/legacypool.go
+++ b/core/txpool/legacypool/legacypool.go
@@ -205,13 +205,14 @@ func (config *Config) sanitize() Config {
// current state) and future transactions. Transactions move between those
// two states over time as they are received and processed.
type LegacyPool struct {
- config Config
- chainconfig *params.ChainConfig
- chain BlockChain
- gasTip atomic.Pointer[uint256.Int]
- txFeed event.Feed
- signer types.Signer
- mu sync.RWMutex
+ config Config
+ chainconfig *params.ChainConfig
+ chain BlockChain
+ gasTip atomic.Pointer[uint256.Int]
+ txFeed event.Feed
+ mempoolClearFeed event.Feed
+ signer types.Signer
+ mu sync.RWMutex
astria *astriaOrdered
@@ -238,6 +239,8 @@ type LegacyPool struct {
initDoneCh chan struct{} // is closed once the pool is initialized (for tests)
changesSinceReorg int // A counter for how many drops we've performed in-between reorg.
+
+ auctioneerEnabled bool
}
type txpoolResetRequest struct {
@@ -246,26 +249,27 @@ type txpoolResetRequest struct {
// New creates a new transaction pool to gather, sort and filter inbound
// transactions from the network.
-func New(config Config, chain BlockChain) *LegacyPool {
+func New(config Config, chain BlockChain, auctioneerEnabled bool) *LegacyPool {
// Sanitize the input to ensure no vulnerable gas prices are set
config = (&config).sanitize()
// Create the transaction pool with its initial settings
pool := &LegacyPool{
- config: config,
- chain: chain,
- chainconfig: chain.Config(),
- signer: types.LatestSigner(chain.Config()),
- pending: make(map[common.Address]*list),
- queue: make(map[common.Address]*list),
- beats: make(map[common.Address]time.Time),
- all: newLookup(),
- reqResetCh: make(chan *txpoolResetRequest),
- reqPromoteCh: make(chan *accountSet),
- queueTxEventCh: make(chan *types.Transaction),
- reorgDoneCh: make(chan chan struct{}),
- reorgShutdownCh: make(chan struct{}),
- initDoneCh: make(chan struct{}),
+ config: config,
+ chain: chain,
+ chainconfig: chain.Config(),
+ signer: types.LatestSigner(chain.Config()),
+ pending: make(map[common.Address]*list),
+ queue: make(map[common.Address]*list),
+ beats: make(map[common.Address]time.Time),
+ all: newLookup(),
+ reqResetCh: make(chan *txpoolResetRequest),
+ reqPromoteCh: make(chan *accountSet),
+ queueTxEventCh: make(chan *types.Transaction),
+ reorgDoneCh: make(chan chan struct{}),
+ reorgShutdownCh: make(chan struct{}),
+ initDoneCh: make(chan struct{}),
+ auctioneerEnabled: auctioneerEnabled,
}
pool.locals = newAccountSet(pool.signer)
for _, addr := range config.Locals {
@@ -521,6 +525,12 @@ func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs
return pool.txFeed.Subscribe(ch)
}
+// SubscribeTransactions registers a subscription for the event which is triggered
+// when the mempool is cleared after a reset
+func (pool *LegacyPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription {
+ return pool.mempoolClearFeed.Subscribe(ch)
+}
+
// SetGasTip updates the minimum gas tip required by the transaction pool for a
// new transaction, and drops all transactions below this threshold.
func (pool *LegacyPool) SetGasTip(tip *big.Int) {
@@ -1366,8 +1376,16 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
}
pool.mu.Lock()
if reset != nil {
- // Reset from the old head to the new, rescheduling any reorged transactions
- pool.reset(reset.oldHead, reset.newHead)
+ // only reset the state root and the head of the txpool when we are running the auctioneer node.
+ // when we are not running the auctioneer node, we re-inject any re-orged transactions which is similar
+ // to the current functionality of geth
+ if pool.auctioneerEnabled {
+ // only reset from the old head to the new head
+ pool.resetHeadOnly(reset.oldHead, reset.newHead)
+ } else {
+ // Reset from the old head to the new, rescheduling any reorged transactions
+ pool.reset(reset.oldHead, reset.newHead)
+ }
// Nonces were reset, discard any events that became stale
for addr := range events {
@@ -1376,7 +1394,6 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
delete(events, addr)
}
}
- // Reset needs promote for all addresses
promoteAddrs = make([]common.Address, 0, len(pool.queue))
for addr := range pool.queue {
promoteAddrs = append(promoteAddrs, addr)
@@ -1389,7 +1406,13 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
// remove any transaction that has been included in the block or was invalidated
// because of another transaction (e.g. higher gas price).
if reset != nil {
- pool.demoteUnexecutables()
+ if pool.auctioneerEnabled {
+ // if we are running the pool as an auctioneer, then we should clear the mempool each time the head
+ // is reset
+ pool.clearPendingAndQueued(reset.newHead)
+ } else {
+ pool.demoteUnexecutables()
+ }
if reset.newHead != nil {
if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) {
pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead)
@@ -1414,6 +1437,10 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest,
pool.changesSinceReorg = 0 // Reset change counter
pool.mu.Unlock()
+ if reset != nil {
+ pool.mempoolClearFeed.Send(core.NewMempoolCleared{NewHead: reset.newHead})
+ }
+
// Notify subsystems for newly added transactions
for _, tx := range promoted {
addr, _ := types.Sender(pool.signer, tx)
@@ -1511,6 +1538,7 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
}
}
}
+
// Initialize the internal state to the current head
if newHead == nil {
newHead = pool.chain.CurrentBlock() // Special case during testing
@@ -1524,12 +1552,30 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) {
pool.currentState = statedb
pool.pendingNonces = newNoncer(statedb)
+ // we don't care about these
// Inject any transactions discarded due to reorgs
log.Debug("Reinjecting stale transactions", "count", len(reinject))
core.SenderCacher.Recover(pool.signer, reinject)
pool.addTxsLocked(reinject, false)
}
+// reset retrieves the current state of the blockchain and ensures the content
+// of the transaction pool is valid with regard to the chain state.
+func (pool *LegacyPool) resetHeadOnly(oldHead, newHead *types.Header) {
+ // Initialize the internal state to the current head
+ if newHead == nil {
+ newHead = pool.chain.CurrentBlock() // Special case during testing
+ }
+ statedb, err := pool.chain.StateAt(newHead.Root)
+ if err != nil {
+ log.Error("Failed to reset txpool state", "err", err)
+ return
+ }
+ pool.currentHead.Store(newHead)
+ pool.currentState = statedb
+ pool.pendingNonces = newNoncer(statedb)
+}
+
// promoteExecutables moves transactions that have become processable from the
// future queue to the set of pending transactions. During this process, all
// invalidated transactions (low nonce, low balance) are deleted.
@@ -1732,6 +1778,55 @@ func (pool *LegacyPool) truncateQueue() {
}
}
+// clearPendingAndQueued removes invalid and processed transactions from the pools
+// it assumes that the pool lock is being held
+func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) {
+ // Iterate over all accounts and demote any non-executable transactions
+ addrsForWhichTxsRemoved := map[common.Address]bool{}
+
+ for addr, list := range pool.pending {
+ dropped, invalids := list.ClearList()
+
+ pendingGauge.Dec(int64(dropped.Len() + invalids.Len()))
+
+ for _, tx := range dropped {
+ pool.all.Remove(tx.Hash())
+ }
+ for _, tx := range invalids {
+ pool.all.Remove(tx.Hash())
+ }
+
+ if list.Empty() {
+ delete(pool.pending, addr)
+ delete(pool.beats, addr)
+
+ addrsForWhichTxsRemoved[addr] = true
+ }
+ }
+
+ for addr, list := range pool.queue {
+ dropped, invalids := list.ClearList()
+ queuedGauge.Dec(int64(dropped.Len() + invalids.Len()))
+
+ for _, tx := range dropped {
+ pool.all.Remove(tx.Hash())
+ }
+ for _, tx := range invalids {
+ pool.all.Remove(tx.Hash())
+ }
+
+ if list.Empty() {
+ delete(pool.queue, addr)
+
+ addrsForWhichTxsRemoved[addr] = true
+ }
+ }
+
+ for addr := range addrsForWhichTxsRemoved {
+ pool.reserve(addr, false)
+ }
+}
+
// demoteUnexecutables removes invalid and processed transactions from the pools
// executable/pending queue and any subsequent transactions that become unexecutable
// are moved back into the future queue.
@@ -1742,6 +1837,7 @@ func (pool *LegacyPool) truncateQueue() {
func (pool *LegacyPool) demoteUnexecutables() {
// Iterate over all accounts and demote any non-executable transactions
gasLimit := pool.currentHead.Load().GasLimit
+
for addr, list := range pool.pending {
nonce := pool.currentState.GetNonce(addr)
diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/legacypool/legacypool2_test.go
index fd961d1d9..d0e1d0e04 100644
--- a/core/txpool/legacypool/legacypool2_test.go
+++ b/core/txpool/legacypool/legacypool2_test.go
@@ -85,7 +85,7 @@ func TestTransactionFutureAttack(t *testing.T) {
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
fillPool(t, pool)
@@ -119,7 +119,7 @@ func TestTransactionFuture1559(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -152,7 +152,7 @@ func TestTransactionZAttack(t *testing.T) {
// Create the pool to test the pricing enforcement with
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
// Create a number of test accounts, fund them and make transactions
@@ -223,7 +223,7 @@ func BenchmarkFutureAttack(b *testing.B) {
config := testTxPoolConfig
config.GlobalQueue = 100
config.GlobalSlots = 100
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
fillPool(b, pool)
diff --git a/core/txpool/legacypool/legacypool_no_auctioneer_test.go b/core/txpool/legacypool/legacypool_no_auctioneer_test.go
new file mode 100644
index 000000000..1f70b5334
--- /dev/null
+++ b/core/txpool/legacypool/legacypool_no_auctioneer_test.go
@@ -0,0 +1,2534 @@
+// Copyright 2015 The go-ethereum Authors
+// This file is part of the go-ethereum library.
+//
+// The go-ethereum library is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Lesser General Public License as published by
+// the Free Software Foundation, either version 3 of the License, or
+// (at your option) any later version.
+//
+// The go-ethereum library is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Lesser General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public License
+// along with the go-ethereum library. If not, see .
+
+package legacypool
+
+import (
+ "crypto/ecdsa"
+ "errors"
+ "math/big"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/rawdb"
+ "github.com/ethereum/go-ethereum/core/state"
+ "github.com/ethereum/go-ethereum/core/tracing"
+ "github.com/ethereum/go-ethereum/core/txpool"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/event"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/uint256"
+)
+
+func init() {
+ testTxPoolConfig = DefaultConfig
+ testTxPoolConfig.Journal = ""
+
+ cpy := *params.TestChainConfig
+ eip1559Config = &cpy
+ eip1559Config.BerlinBlock = common.Big0
+ eip1559Config.LondonBlock = common.Big0
+}
+
+// This test simulates a scenario where a new block is imported during a
+// state reset and tests whether the pending state is in sync with the
+// block head event that initiated the resetState().
+func TestStateChangeDuringResetNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ var (
+ key, _ = crypto.GenerateKey()
+ address = crypto.PubkeyToAddress(key.PublicKey)
+ statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ trigger = false
+ )
+
+ // setup pool with 2 transaction in it
+ statedb.SetBalance(address, new(uint256.Int).SetUint64(params.Ether), tracing.BalanceChangeUnspecified)
+ blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger}
+
+ tx0 := transaction(0, 100000, key)
+ tx1 := transaction(1, 100000, key)
+
+ pool := New(testTxPoolConfig, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ nonce := pool.Nonce(address)
+ if nonce != 0 {
+ t.Fatalf("Invalid nonce, want 0, got %d", nonce)
+ }
+
+ pool.addRemotesSync([]*types.Transaction{tx0, tx1})
+
+ nonce = pool.Nonce(address)
+ if nonce != 2 {
+ t.Fatalf("Invalid nonce, want 2, got %d", nonce)
+ }
+
+ // trigger state change in the background
+ trigger = true
+ <-pool.requestReset(nil, nil)
+
+ nonce = pool.Nonce(address)
+ if nonce != 2 {
+ t.Fatalf("Invalid nonce, want 2, got %d", nonce)
+ }
+}
+
+func TestInvalidTransactionsNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ tx := transaction(0, 100, key)
+ from, _ := deriveSender(tx)
+
+ // Intrinsic gas too low
+ testAddBalance(pool, from, big.NewInt(1))
+ if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
+ }
+
+ // Insufficient funds
+ tx = transaction(0, 100000, key)
+ if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
+ }
+
+ testSetNonce(pool, from, 1)
+ testAddBalance(pool, from, big.NewInt(0xffffffffffffff))
+ tx = transaction(0, 100000, key)
+ if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
+ }
+
+ tx = transaction(1, 100000, key)
+ pool.gasTip.Store(uint256.NewInt(1000))
+ if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) {
+ t.Errorf("want %v have %v", want, err)
+ }
+ if err := pool.addLocal(tx); err != nil {
+ t.Error("expected", nil, "got", err)
+ }
+}
+
+func TestQueueNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ tx := transaction(0, 100, key)
+ from, _ := deriveSender(tx)
+ testAddBalance(pool, from, big.NewInt(1000))
+ <-pool.requestReset(nil, nil)
+
+ pool.enqueueTx(tx.Hash(), tx, false, true)
+ <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
+ if len(pool.pending) != 1 {
+ t.Error("expected valid txs to be 1 is", len(pool.pending))
+ }
+
+ tx = transaction(1, 100, key)
+ from, _ = deriveSender(tx)
+ testSetNonce(pool, from, 2)
+ pool.enqueueTx(tx.Hash(), tx, false, true)
+
+ <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from))
+ if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok {
+ t.Error("expected transaction to be in tx pool")
+ }
+ if len(pool.queue) > 0 {
+ t.Error("expected transaction queue to be empty. is", len(pool.queue))
+ }
+}
+
+func TestQueue2NoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ tx1 := transaction(0, 100, key)
+ tx2 := transaction(10, 100, key)
+ tx3 := transaction(11, 100, key)
+ from, _ := deriveSender(tx1)
+ testAddBalance(pool, from, big.NewInt(1000))
+ pool.reset(nil, nil)
+
+ pool.enqueueTx(tx1.Hash(), tx1, false, true)
+ pool.enqueueTx(tx2.Hash(), tx2, false, true)
+ pool.enqueueTx(tx3.Hash(), tx3, false, true)
+
+ pool.promoteExecutables([]common.Address{from})
+ if len(pool.pending) != 1 {
+ t.Error("expected pending length to be 1, got", len(pool.pending))
+ }
+ if pool.queue[from].Len() != 2 {
+ t.Error("expected len(queue) == 2, got", pool.queue[from].Len())
+ }
+}
+
+func TestNegativeValueNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key)
+ from, _ := deriveSender(tx)
+ testAddBalance(pool, from, big.NewInt(1))
+ if err := pool.addRemote(tx); err != txpool.ErrNegativeValue {
+ t.Error("expected", txpool.ErrNegativeValue, "got", err)
+ }
+}
+
+func TestTipAboveFeeCapNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPoolWithConfig(eip1559Config, false)
+ defer pool.Close()
+
+ tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key)
+
+ if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap {
+ t.Error("expected", core.ErrTipAboveFeeCap, "got", err)
+ }
+}
+
+func TestVeryHighValuesNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPoolWithConfig(eip1559Config, false)
+ defer pool.Close()
+
+ veryBigNumber := big.NewInt(1)
+ veryBigNumber.Lsh(veryBigNumber, 300)
+
+ tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key)
+ if err := pool.addRemote(tx); err != core.ErrTipVeryHigh {
+ t.Error("expected", core.ErrTipVeryHigh, "got", err)
+ }
+
+ tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key)
+ if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh {
+ t.Error("expected", core.ErrFeeCapVeryHigh, "got", err)
+ }
+}
+
+func TestChainForkNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ resetState := func() {
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified)
+
+ pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
+ <-pool.requestReset(nil, nil)
+ }
+ resetState()
+
+ tx := transaction(0, 100000, key)
+ if _, err := pool.add(tx, false); err != nil {
+ t.Error("didn't expect error", err)
+ }
+ pool.removeTx(tx.Hash(), true, true)
+
+ // reset the pool's internal state
+ resetState()
+ if _, err := pool.add(tx, false); err != nil {
+ t.Error("didn't expect error", err)
+ }
+}
+
+func TestRemoveTxSanityNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ resetState := func() {
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified)
+
+ pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
+ <-pool.requestReset(nil, nil)
+ }
+ resetState()
+
+ tx1 := transaction(0, 100000, key)
+ tx2 := transaction(1, 100000, key)
+ tx3 := transaction(2, 100000, key)
+
+ if err := pool.addLocal(tx1); err != nil {
+ t.Error("didn't expect error", err)
+ }
+ if err := pool.addLocal(tx2); err != nil {
+ t.Error("didn't expect error", err)
+ }
+ if err := pool.addLocal(tx3); err != nil {
+ t.Error("didn't expect error", err)
+ }
+
+ pendingTxs := pool.pending[addr]
+ if pendingTxs.Len() != 3 {
+ t.Error("expected 3 pending transactions, got", pendingTxs.Len())
+ }
+
+ if err := validatePoolInternals(pool); err != nil {
+ t.Errorf("pool internals validation failed: %v", err)
+ }
+
+ n := pool.removeTx(tx1.Hash(), false, true)
+ if n != 3 {
+ t.Error("expected 3 transactions to be removed, got", n)
+ }
+ n = pool.removeTx(tx2.Hash(), false, true)
+ if n != 0 {
+ t.Error("expected 0 transactions to be removed, got", n)
+ }
+ n = pool.removeTx(tx3.Hash(), false, true)
+ if n != 0 {
+ t.Error("expected 0 transactions to be removed, got", n)
+ }
+
+ if len(pool.pending) != 0 {
+ t.Error("expected 0 pending transactions, got", pendingTxs.Len())
+ }
+
+ if err := validatePoolInternals(pool); err != nil {
+ t.Errorf("pool internals validation failed: %v", err)
+ }
+}
+
+func TestDoubleNonceNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ resetState := func() {
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified)
+
+ pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed))
+ <-pool.requestReset(nil, nil)
+ }
+ resetState()
+
+ signer := types.HomesteadSigner{}
+ tx1, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100000, big.NewInt(1), nil), signer, key)
+ tx2, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(2), nil), signer, key)
+ tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(1), nil), signer, key)
+
+ // Add the first two transaction, ensure higher priced stays only
+ if replace, err := pool.add(tx1, false); err != nil || replace {
+ t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace)
+ }
+ if replace, err := pool.add(tx2, false); err != nil || !replace {
+ t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace)
+ }
+ <-pool.requestPromoteExecutables(newAccountSet(signer, addr))
+ if pool.pending[addr].Len() != 1 {
+ t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
+ }
+ if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
+ t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
+ }
+
+ // Add the third transaction and ensure it's not saved (smaller price)
+ pool.add(tx3, false)
+ <-pool.requestPromoteExecutables(newAccountSet(signer, addr))
+ if pool.pending[addr].Len() != 1 {
+ t.Error("expected 1 pending transactions, got", pool.pending[addr].Len())
+ }
+ if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() {
+ t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash())
+ }
+ // Ensure the total transaction count is correct
+ if pool.all.Count() != 1 {
+ t.Error("expected 1 total transactions, got", pool.all.Count())
+ }
+}
+
+func TestMissingNonceNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, addr, big.NewInt(100000000000000))
+ tx := transaction(1, 100000, key)
+ if _, err := pool.add(tx, false); err != nil {
+ t.Error("didn't expect error", err)
+ }
+ if len(pool.pending) != 0 {
+ t.Error("expected 0 pending transactions, got", len(pool.pending))
+ }
+ if pool.queue[addr].Len() != 1 {
+ t.Error("expected 1 queued transaction, got", pool.queue[addr].Len())
+ }
+ if pool.all.Count() != 1 {
+ t.Error("expected 1 total transactions, got", pool.all.Count())
+ }
+}
+
+func TestNonceRecoveryNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ const n = 10
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ testSetNonce(pool, addr, n)
+ testAddBalance(pool, addr, big.NewInt(100000000000000))
+ <-pool.requestReset(nil, nil)
+
+ tx := transaction(n, 100000, key)
+ if err := pool.addRemote(tx); err != nil {
+ t.Error(err)
+ }
+ // simulate some weird re-order of transactions and missing nonce(s)
+ testSetNonce(pool, addr, n-1)
+ <-pool.requestReset(nil, nil)
+ if fn := pool.Nonce(addr); fn != n-1 {
+ t.Errorf("expected nonce to be %d, got %d", n-1, fn)
+ }
+}
+
+// Tests that if an account runs out of funds, any pending and queued transactions
+// are dropped.
+func TestDroppingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create a test account and fund it
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000))
+
+ // Add some pending and some queued transactions
+ var (
+ tx0 = transaction(0, 100, key)
+ tx1 = transaction(1, 200, key)
+ tx2 = transaction(2, 300, key)
+ tx10 = transaction(10, 100, key)
+ tx11 = transaction(11, 200, key)
+ tx12 = transaction(12, 300, key)
+ )
+ pool.all.Add(tx0, false)
+ pool.priced.Put(tx0, false)
+ pool.promoteTx(account, tx0.Hash(), tx0)
+
+ pool.all.Add(tx1, false)
+ pool.priced.Put(tx1, false)
+ pool.promoteTx(account, tx1.Hash(), tx1)
+
+ pool.all.Add(tx2, false)
+ pool.priced.Put(tx2, false)
+ pool.promoteTx(account, tx2.Hash(), tx2)
+
+ pool.enqueueTx(tx10.Hash(), tx10, false, true)
+ pool.enqueueTx(tx11.Hash(), tx11, false, true)
+ pool.enqueueTx(tx12.Hash(), tx12, false, true)
+
+ // Check that pre and post validations leave the pool as is
+ if pool.pending[account].Len() != 3 {
+ t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
+ }
+ if pool.queue[account].Len() != 3 {
+ t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
+ }
+ if pool.all.Count() != 6 {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
+ }
+ <-pool.requestReset(nil, nil)
+ if pool.pending[account].Len() != 3 {
+ t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
+ }
+ if pool.queue[account].Len() != 3 {
+ t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
+ }
+ if pool.all.Count() != 6 {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
+ }
+ // Reduce the balance of the account, and check that invalidated transactions are dropped
+ testAddBalance(pool, account, big.NewInt(-650))
+ <-pool.requestReset(nil, nil)
+
+ if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
+ t.Errorf("funded pending transaction missing: %v", tx0)
+ }
+ if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok {
+ t.Errorf("funded pending transaction missing: %v", tx0)
+ }
+ if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok {
+ t.Errorf("out-of-fund pending transaction present: %v", tx1)
+ }
+ if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
+ t.Errorf("funded queued transaction missing: %v", tx10)
+ }
+ if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok {
+ t.Errorf("funded queued transaction missing: %v", tx10)
+ }
+ if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok {
+ t.Errorf("out-of-fund queued transaction present: %v", tx11)
+ }
+ if pool.all.Count() != 4 {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4)
+ }
+ // Reduce the block gas limit, check that invalidated transactions are dropped
+ pool.chain.(*testBlockChain).gasLimit.Store(100)
+ <-pool.requestReset(nil, nil)
+
+ if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
+ t.Errorf("funded pending transaction missing: %v", tx0)
+ }
+ if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
+ t.Errorf("over-gased pending transaction present: %v", tx1)
+ }
+ if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
+ t.Errorf("funded queued transaction missing: %v", tx10)
+ }
+ if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok {
+ t.Errorf("over-gased queued transaction present: %v", tx11)
+ }
+ if pool.all.Count() != 2 {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2)
+ }
+}
+
+// Tests that if a transaction is dropped from the current pending pool (e.g. out
+// of fund), all consecutive (still valid, but not executable) transactions are
+// postponed back into the future queue to prevent broadcasting them.
+func TestPostponingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the postponing with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ pool := New(testTxPoolConfig, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create two test accounts to produce different gap profiles with
+ keys := make([]*ecdsa.PrivateKey, 2)
+ accs := make([]common.Address, len(keys))
+
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ accs[i] = crypto.PubkeyToAddress(keys[i].PublicKey)
+
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(50100))
+ }
+ // Add a batch consecutive pending transactions for validation
+ txs := []*types.Transaction{}
+ for i, key := range keys {
+ for j := 0; j < 100; j++ {
+ var tx *types.Transaction
+ if (i+j)%2 == 0 {
+ tx = transaction(uint64(j), 25000, key)
+ } else {
+ tx = transaction(uint64(j), 50000, key)
+ }
+ txs = append(txs, tx)
+ }
+ }
+ for i, err := range pool.addRemotesSync(txs) {
+ if err != nil {
+ t.Fatalf("tx %d: failed to add transactions: %v", i, err)
+ }
+ }
+ // Check that pre and post validations leave the pool as is
+ if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
+ t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
+ }
+ if len(pool.queue) != 0 {
+ t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
+ }
+ if pool.all.Count() != len(txs) {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
+ }
+ <-pool.requestReset(nil, nil)
+ if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
+ t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
+ }
+ if len(pool.queue) != 0 {
+ t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
+ }
+ if pool.all.Count() != len(txs) {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
+ }
+ // Reduce the balance of the account, and check that transactions are reorganised
+ for _, addr := range accs {
+ testAddBalance(pool, addr, big.NewInt(-1))
+ }
+ <-pool.requestReset(nil, nil)
+
+ // The first account's first transaction remains valid, check that subsequent
+ // ones are either filtered out, or queued up for later.
+ if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok {
+ t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0])
+ }
+ if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok {
+ t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0])
+ }
+ for i, tx := range txs[1:100] {
+ if i%2 == 1 {
+ if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
+ t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx)
+ }
+ if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok {
+ t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx)
+ }
+ } else {
+ if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
+ t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx)
+ }
+ if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok {
+ t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx)
+ }
+ }
+ }
+ // The second account's first transaction got invalid, check that all transactions
+ // are either filtered out, or queued up for later.
+ if pool.pending[accs[1]] != nil {
+ t.Errorf("invalidated account still has pending transactions")
+ }
+ for i, tx := range txs[100:] {
+ if i%2 == 1 {
+ if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok {
+ t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx)
+ }
+ } else {
+ if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok {
+ t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx)
+ }
+ }
+ }
+ if pool.all.Count() != len(txs)/2 {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2)
+ }
+}
+
+// Tests that if the transaction pool has both executable and non-executable
+// transactions from an origin account, filling the nonce gap moves all queued
+// ones into the pending pool.
+func TestGapFillingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create a test account and fund it
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000000))
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Create a pending and a queued transaction with a nonce-gap in between
+ pool.addRemotesSync([]*types.Transaction{
+ transaction(0, 100000, key),
+ transaction(2, 100000, key),
+ })
+ pending, queued := pool.Stats()
+ if pending != 1 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
+ }
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ if err := validateEvents(events, 1); err != nil {
+ t.Fatalf("original event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Fill the nonce gap and ensure all transactions become pending
+ if err := pool.addRemoteSync(transaction(1, 100000, key)); err != nil {
+ t.Fatalf("failed to add gapped transaction: %v", err)
+ }
+ pending, queued = pool.Stats()
+ if pending != 3 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validateEvents(events, 2); err != nil {
+ t.Fatalf("gap-filling event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that if the transaction count belonging to a single account goes above
+// some threshold, the higher transactions are dropped to prevent DOS attacks.
+func TestQueueAccountLimitingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create a test account and fund it
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000000))
+
+ // Keep queuing up transactions and make sure all above a limit are dropped
+ for i := uint64(1); i <= testTxPoolConfig.AccountQueue+5; i++ {
+ if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil {
+ t.Fatalf("tx %d: failed to add transaction: %v", i, err)
+ }
+ if len(pool.pending) != 0 {
+ t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0)
+ }
+ if i <= testTxPoolConfig.AccountQueue {
+ if pool.queue[account].Len() != int(i) {
+ t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i)
+ }
+ } else {
+ if pool.queue[account].Len() != int(testTxPoolConfig.AccountQueue) {
+ t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), testTxPoolConfig.AccountQueue)
+ }
+ }
+ }
+ if pool.all.Count() != int(testTxPoolConfig.AccountQueue) {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue)
+ }
+}
+
+// Tests that if the transaction count belonging to multiple accounts go above
+// some threshold, the higher transactions are dropped to prevent DOS attacks.
+//
+// This logic should not hold for local transactions, unless the local tracking
+// mechanism is disabled.
+func TestQueueGlobalLimitingNoAuctioneer(t *testing.T) {
+ testQueueGlobalLimiting(t, false)
+}
+func TestQueueGlobalLimitingNoLocalsNoAuctioneer(t *testing.T) {
+ testQueueGlobalLimiting(t, true)
+}
+
+func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
+ t.Parallel()
+
+ // Create the pool to test the limit enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.NoLocals = nolocals
+ config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
+
+ pool := New(config, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create a number of test accounts and fund them (last one will be the local)
+ keys := make([]*ecdsa.PrivateKey, 5)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ local := keys[len(keys)-1]
+
+ // Generate and queue a batch of transactions
+ nonces := make(map[common.Address]uint64)
+
+ txs := make(types.Transactions, 0, 3*config.GlobalQueue)
+ for len(txs) < cap(txs) {
+ key := keys[rand.Intn(len(keys)-1)] // skip adding transactions with the local account
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+
+ txs = append(txs, transaction(nonces[addr]+1, 100000, key))
+ nonces[addr]++
+ }
+ // Import the batch and verify that limits have been enforced
+ pool.addRemotesSync(txs)
+
+ queued := 0
+ for addr, list := range pool.queue {
+ if list.Len() > int(config.AccountQueue) {
+ t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue)
+ }
+ queued += list.Len()
+ }
+ if queued > int(config.GlobalQueue) {
+ t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue)
+ }
+ // Generate a batch of transactions from the local account and import them
+ txs = txs[:0]
+ for i := uint64(0); i < 3*config.GlobalQueue; i++ {
+ txs = append(txs, transaction(i+1, 100000, local))
+ }
+ pool.addLocals(txs)
+
+ // If locals are disabled, the previous eviction algorithm should apply here too
+ if nolocals {
+ queued := 0
+ for addr, list := range pool.queue {
+ if list.Len() > int(config.AccountQueue) {
+ t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue)
+ }
+ queued += list.Len()
+ }
+ if queued > int(config.GlobalQueue) {
+ t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue)
+ }
+ } else {
+ // Local exemptions are enabled, make sure the local account owned the queue
+ if len(pool.queue) != 1 {
+ t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1)
+ }
+ // Also ensure no local transactions are ever dropped, even if above global limits
+ if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue {
+ t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue)
+ }
+ }
+}
+
+// Tests that if an account remains idle for a prolonged amount of time, any
+// non-executable transactions queued up are dropped to prevent wasting resources
+// on shuffling them around.
+//
+// This logic should not hold for local transactions, unless the local tracking
+// mechanism is disabled.
+func TestQueueTimeLimitingNoAuctioneer(t *testing.T) {
+ testQueueTimeLimitingNoAuctioneer(t, false)
+}
+func TestQueueTimeLimitingNoLocalsNoAuctioneer(t *testing.T) {
+ testQueueTimeLimitingNoAuctioneer(t, true)
+}
+
+func testQueueTimeLimitingNoAuctioneer(t *testing.T, nolocals bool) {
+ // Reduce the eviction interval to a testable amount
+ defer func(old time.Duration) { evictionInterval = old }(evictionInterval)
+ evictionInterval = time.Millisecond * 100
+
+ // Create the pool to test the non-expiration enforcement
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.Lifetime = time.Second
+ config.NoLocals = nolocals
+
+ pool := New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create two test accounts to ensure remotes expire but locals do not
+ local, _ := crypto.GenerateKey()
+ remote, _ := crypto.GenerateKey()
+
+ testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000))
+ testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
+
+ // Add the two transactions and ensure they both are queued up
+ if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add local transaction: %v", err)
+ }
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil {
+ t.Fatalf("failed to add remote transaction: %v", err)
+ }
+ pending, queued := pool.Stats()
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ if queued != 2 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ // Allow the eviction interval to run
+ time.Sleep(2 * evictionInterval)
+
+ // Transactions should not be evicted from the queue yet since lifetime duration has not passed
+ pending, queued = pool.Stats()
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ if queued != 2 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ // Wait a bit for eviction to run and clean up any leftovers, and ensure only the local remains
+ time.Sleep(2 * config.Lifetime)
+
+ pending, queued = pool.Stats()
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ if nolocals {
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ } else {
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ // remove current transactions and increase nonce to prepare for a reset and cleanup
+ statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2)
+ statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
+ <-pool.requestReset(nil, nil)
+
+ // make sure queue, pending are cleared
+ pending, queued = pool.Stats()
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ // Queue gapped transactions
+ if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add remote transaction: %v", err)
+ }
+ if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil {
+ t.Fatalf("failed to add remote transaction: %v", err)
+ }
+ time.Sleep(5 * evictionInterval) // A half lifetime pass
+
+ // Queue executable transactions, the life cycle should be restarted.
+ if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add remote transaction: %v", err)
+ }
+ if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil {
+ t.Fatalf("failed to add remote transaction: %v", err)
+ }
+ time.Sleep(6 * evictionInterval)
+
+ // All gapped transactions shouldn't be kicked out
+ pending, queued = pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 2 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ // The whole life time pass after last promotion, kick out stale transactions
+ time.Sleep(2 * config.Lifetime)
+ pending, queued = pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if nolocals {
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ } else {
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that even if the transaction count belonging to a single account goes
+// above some threshold, as long as the transactions are executable, they are
+// accepted.
+func TestPendingLimitingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create a test account and fund it
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000000000000))
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Keep queuing up transactions and make sure all above a limit are dropped
+ for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ {
+ if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil {
+ t.Fatalf("tx %d: failed to add transaction: %v", i, err)
+ }
+ if pool.pending[account].Len() != int(i)+1 {
+ t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1)
+ }
+ if len(pool.queue) != 0 {
+ t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0)
+ }
+ }
+ if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) {
+ t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue+5)
+ }
+ if err := validateEvents(events, int(testTxPoolConfig.AccountQueue+5)); err != nil {
+ t.Fatalf("event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that if the transaction count belonging to multiple accounts go above
+// some hard threshold, the higher transactions are dropped to prevent DOS
+// attacks.
+func TestPendingGlobalLimitingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the limit enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.GlobalSlots = config.AccountSlots * 10
+
+ pool := New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 5)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ // Generate and queue a batch of transactions
+ nonces := make(map[common.Address]uint64)
+
+ txs := types.Transactions{}
+ for _, key := range keys {
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ for j := 0; j < int(config.GlobalSlots)/len(keys)*2; j++ {
+ txs = append(txs, transaction(nonces[addr], 100000, key))
+ nonces[addr]++
+ }
+ }
+ // Import the batch and verify that limits have been enforced
+ pool.addRemotesSync(txs)
+
+ pending := 0
+ for _, list := range pool.pending {
+ pending += list.Len()
+ }
+ if pending > int(config.GlobalSlots) {
+ t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Test the limit on transaction size is enforced correctly.
+// This test verifies every transaction having allowed size
+// is added to the pool, and longer transactions are rejected.
+func TestAllowedTxSizeNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create a test account and fund it
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000000000))
+
+ // Compute maximal data size for transactions (lower bound).
+ //
+ // It is assumed the fields in the transaction (except of the data) are:
+ // - nonce <= 32 bytes
+ // - gasTip <= 32 bytes
+ // - gasLimit <= 32 bytes
+ // - recipient == 20 bytes
+ // - value <= 32 bytes
+ // - signature == 65 bytes
+ // All those fields are summed up to at most 213 bytes.
+ baseSize := uint64(213)
+ dataSize := txMaxSize - baseSize
+ // Try adding a transaction with maximal allowed size
+ tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize)
+ if err := pool.addRemoteSync(tx); err != nil {
+ t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err)
+ }
+ // Try adding a transaction with random allowed size
+ if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil {
+ t.Fatalf("failed to add transaction of random allowed size: %v", err)
+ }
+ // Try adding a transaction of minimal not allowed size
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil {
+ t.Fatalf("expected rejection on slightly oversize transaction")
+ }
+ // Try adding a transaction of random not allowed size
+ if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil {
+ t.Fatalf("expected rejection on oversize transaction")
+ }
+ // Run some sanity checks on the pool internals
+ pending, queued := pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that if transactions start being capped, transactions are also removed from 'all'
+func TestCapClearsFromAllNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the limit enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.AccountSlots = 2
+ config.AccountQueue = 2
+ config.GlobalSlots = 8
+
+ pool := New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create a number of test accounts and fund them
+ key, _ := crypto.GenerateKey()
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, addr, big.NewInt(1000000))
+
+ txs := types.Transactions{}
+ for j := 0; j < int(config.GlobalSlots)*2; j++ {
+ txs = append(txs, transaction(uint64(j), 100000, key))
+ }
+ // Import the batch and verify that limits have been enforced
+ pool.addRemotes(txs)
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that if the transaction count belonging to multiple accounts go above
+// some hard threshold, if they are under the minimum guaranteed slot count then
+// the transactions are still kept.
+func TestPendingMinimumAllowanceNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the limit enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.GlobalSlots = 1
+
+ pool := New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 5)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ // Generate and queue a batch of transactions
+ nonces := make(map[common.Address]uint64)
+
+ txs := types.Transactions{}
+ for _, key := range keys {
+ addr := crypto.PubkeyToAddress(key.PublicKey)
+ for j := 0; j < int(config.AccountSlots)*2; j++ {
+ txs = append(txs, transaction(nonces[addr], 100000, key))
+ nonces[addr]++
+ }
+ }
+ // Import the batch and verify that limits have been enforced
+ pool.addRemotesSync(txs)
+
+ for addr, list := range pool.pending {
+ if list.Len() != int(config.AccountSlots) {
+ t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots)
+ }
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that setting the transaction pool gas price to a higher value correctly
+// discards everything cheaper than that and moves any gapped transactions back
+// from the pending pool to the queue.
+//
+// Note, local transactions are never allowed to be dropped.
+func TestRepricingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ pool := New(testTxPoolConfig, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, 32)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 4)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ // Generate and queue a batch of transactions, both pending and queued
+ txs := types.Transactions{}
+
+ txs = append(txs, pricedTransaction(0, 100000, big.NewInt(2), keys[0]))
+ txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[0]))
+ txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[0]))
+
+ txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[1]))
+ txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[1]))
+ txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[1]))
+
+ txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[2]))
+ txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2]))
+ txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[2]))
+
+ ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3])
+
+ // Import the batch and that both pending and queued transactions match up
+ pool.addRemotesSync(txs)
+ pool.addLocal(ltx)
+
+ pending, queued := pool.Stats()
+ if pending != 7 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7)
+ }
+ if queued != 3 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
+ }
+ if err := validateEvents(events, 7); err != nil {
+ t.Fatalf("original event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Reprice the pool and check that underpriced transactions get dropped
+ pool.SetGasTip(big.NewInt(2))
+
+ pending, queued = pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 5 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5)
+ }
+ if err := validateEvents(events, 0); err != nil {
+ t.Fatalf("reprice event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Check that we can't add the old transactions back
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+ if err := validateEvents(events, 0); err != nil {
+ t.Fatalf("post-reprice event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // However we can add local underpriced transactions
+ tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3])
+ if err := pool.addLocal(tx); err != nil {
+ t.Fatalf("failed to add underpriced local transaction: %v", err)
+ }
+ if pending, _ = pool.Stats(); pending != 3 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
+ }
+ if err := validateEvents(events, 1); err != nil {
+ t.Fatalf("post-reprice local event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // And we can fill gaps with properly priced transactions
+ if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil {
+ t.Fatalf("failed to add pending transaction: %v", err)
+ }
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
+ t.Fatalf("failed to add pending transaction: %v", err)
+ }
+ if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
+ t.Fatalf("failed to add queued transaction: %v", err)
+ }
+ if err := validateEvents(events, 5); err != nil {
+ t.Fatalf("post-reprice event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+func TestMinGasPriceEnforcedNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(eip1559Config, 10000000, statedb, new(event.Feed))
+
+ txPoolConfig := DefaultConfig
+ txPoolConfig.NoLocals = true
+ pool := New(txPoolConfig, blockchain, false)
+ pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ key, _ := crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000))
+
+ tx := pricedTransaction(0, 100000, big.NewInt(2), key)
+ pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1))
+
+ if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("Min tip not enforced")
+ }
+
+ if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("Min tip not enforced")
+ }
+
+ tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key)
+ pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1))
+
+ if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("Min tip not enforced")
+ }
+
+ if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("Min tip not enforced")
+ }
+ // Make sure the tx is accepted if locals are enabled
+ pool.config.NoLocals = false
+ if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; err != nil {
+ t.Fatalf("Min tip enforced with locals enabled, error: %v", err)
+ }
+}
+
+// Tests that setting the transaction pool gas price to a higher value correctly
+// discards everything cheaper (legacy & dynamic fee) than that and moves any
+// gapped transactions back from the pending pool to the queue.
+//
+// Note, local transactions are never allowed to be dropped.
+func TestRepricingDynamicFeeNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ pool, _ := setupPoolWithConfig(eip1559Config, false)
+ defer pool.Close()
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, 32)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 4)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ // Generate and queue a batch of transactions, both pending and queued
+ txs := types.Transactions{}
+
+ txs = append(txs, pricedTransaction(0, 100000, big.NewInt(2), keys[0]))
+ txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[0]))
+ txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[0]))
+
+ txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]))
+ txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(3), big.NewInt(2), keys[1]))
+ txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(3), big.NewInt(2), keys[1]))
+
+ txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(2), keys[2]))
+ txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]))
+ txs = append(txs, dynamicFeeTx(3, 100000, big.NewInt(2), big.NewInt(2), keys[2]))
+
+ ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3])
+
+ // Import the batch and that both pending and queued transactions match up
+ pool.addRemotesSync(txs)
+ pool.addLocal(ltx)
+
+ pending, queued := pool.Stats()
+ if pending != 7 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7)
+ }
+ if queued != 3 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
+ }
+ if err := validateEvents(events, 7); err != nil {
+ t.Fatalf("original event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Reprice the pool and check that underpriced transactions get dropped
+ pool.SetGasTip(big.NewInt(2))
+
+ pending, queued = pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 5 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5)
+ }
+ if err := validateEvents(events, 0); err != nil {
+ t.Fatalf("reprice event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Check that we can't add the old transactions back
+ tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0])
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+ tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+ tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+ if err := validateEvents(events, 0); err != nil {
+ t.Fatalf("post-reprice event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // However we can add local underpriced transactions
+ tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3])
+ if err := pool.addLocal(tx); err != nil {
+ t.Fatalf("failed to add underpriced local transaction: %v", err)
+ }
+ if pending, _ = pool.Stats(); pending != 3 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
+ }
+ if err := validateEvents(events, 1); err != nil {
+ t.Fatalf("post-reprice local event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // And we can fill gaps with properly priced transactions
+ tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0])
+ if err := pool.addRemote(tx); err != nil {
+ t.Fatalf("failed to add pending transaction: %v", err)
+ }
+ tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1])
+ if err := pool.addRemote(tx); err != nil {
+ t.Fatalf("failed to add pending transaction: %v", err)
+ }
+ tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2])
+ if err := pool.addRemoteSync(tx); err != nil {
+ t.Fatalf("failed to add queued transaction: %v", err)
+ }
+ if err := validateEvents(events, 5); err != nil {
+ t.Fatalf("post-reprice event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that setting the transaction pool gas price to a higher value does not
+// remove local transactions (legacy & dynamic fee).
+func TestRepricingKeepsLocalsNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
+
+ pool := New(testTxPoolConfig, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 3)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000))
+ }
+ // Create transaction (both pending and queued) with a linearly growing gasprice
+ for i := uint64(0); i < 500; i++ {
+ // Add pending transaction.
+ pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2])
+ if err := pool.addLocal(pendingTx); err != nil {
+ t.Fatal(err)
+ }
+ // Add queued transaction.
+ queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2])
+ if err := pool.addLocal(queuedTx); err != nil {
+ t.Fatal(err)
+ }
+
+ // Add pending dynamic fee transaction.
+ pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
+ if err := pool.addLocal(pendingTx); err != nil {
+ t.Fatal(err)
+ }
+ // Add queued dynamic fee transaction.
+ queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1])
+ if err := pool.addLocal(queuedTx); err != nil {
+ t.Fatal(err)
+ }
+ }
+ pending, queued := pool.Stats()
+ expPending, expQueued := 1000, 1000
+ validate := func() {
+ pending, queued = pool.Stats()
+ if pending != expPending {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, expPending)
+ }
+ if queued != expQueued {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued)
+ }
+
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ }
+ validate()
+
+ // Reprice the pool and check that nothing is dropped
+ pool.SetGasTip(big.NewInt(2))
+ validate()
+
+ pool.SetGasTip(big.NewInt(2))
+ pool.SetGasTip(big.NewInt(4))
+ pool.SetGasTip(big.NewInt(8))
+ pool.SetGasTip(big.NewInt(100))
+ validate()
+}
+
+// Tests that when the pool reaches its global transaction limit, underpriced
+// transactions are gradually shifted out for more expensive ones and any gapped
+// pending transactions are moved into the queue.
+//
+// Note, local transactions are never allowed to be dropped.
+func TestUnderpricingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.GlobalSlots = 2
+ config.GlobalQueue = 2
+
+ pool := New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, 32)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 5)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ // Generate and queue a batch of transactions, both pending and queued
+ txs := types.Transactions{}
+
+ txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0]))
+ txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0]))
+
+ txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[1]))
+
+ ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2])
+
+ // Import the batch and that both pending and queued transactions match up
+ pool.addRemotes(txs)
+ pool.addLocal(ltx)
+
+ pending, queued := pool.Stats()
+ if pending != 3 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
+ }
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ if err := validateEvents(events, 3); err != nil {
+ t.Fatalf("original event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Ensure that adding an underpriced transaction on block limit fails
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+ // Replace a future transaction with a future transaction
+ if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+ // Ensure that adding high priced transactions drops cheap ones, but not own
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+ if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+ if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+ // Ensure that replacing a pending transaction with a future transaction fails
+ if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending {
+ t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending)
+ }
+ pending, queued = pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 2 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
+ }
+ if err := validateEvents(events, 2); err != nil {
+ t.Fatalf("additional event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Ensure that adding local transactions can push out even higher priced ones
+ ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2])
+ if err := pool.addLocal(ltx); err != nil {
+ t.Fatalf("failed to append underpriced local transaction: %v", err)
+ }
+ ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3])
+ if err := pool.addLocal(ltx); err != nil {
+ t.Fatalf("failed to add new underpriced local transaction: %v", err)
+ }
+ pending, queued = pool.Stats()
+ if pending != 3 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
+ }
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ if err := validateEvents(events, 2); err != nil {
+ t.Fatalf("local event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that more expensive transactions push out cheap ones from the pool, but
+// without producing instability by creating gaps that start jumping transactions
+// back and forth between queued/pending.
+func TestStableUnderpricingNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.GlobalSlots = 128
+ config.GlobalQueue = 0
+
+ pool := New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, 32)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 2)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ // Fill up the entire queue with the same transaction price points
+ txs := types.Transactions{}
+ for i := uint64(0); i < config.GlobalSlots; i++ {
+ txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0]))
+ }
+ pool.addRemotesSync(txs)
+
+ pending, queued := pool.Stats()
+ if pending != int(config.GlobalSlots) {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validateEvents(events, int(config.GlobalSlots)); err != nil {
+ t.Fatalf("original event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil {
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+ pending, queued = pool.Stats()
+ if pending != int(config.GlobalSlots) {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validateEvents(events, 1); err != nil {
+ t.Fatalf("additional event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that when the pool reaches its global transaction limit, underpriced
+// transactions (legacy & dynamic fee) are gradually shifted out for more
+// expensive ones and any gapped pending transactions are moved into the queue.
+//
+// Note, local transactions are never allowed to be dropped.
+func TestUnderpricingDynamicFeeNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, _ := setupPoolWithConfig(eip1559Config, false)
+ defer pool.Close()
+
+ pool.config.GlobalSlots = 2
+ pool.config.GlobalQueue = 2
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, 32)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Create a number of test accounts and fund them
+ keys := make([]*ecdsa.PrivateKey, 4)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+
+ // Generate and queue a batch of transactions, both pending and queued
+ txs := types.Transactions{}
+
+ txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0]))
+ txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0]))
+ txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(1), keys[1]))
+
+ ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2])
+
+ // Import the batch and that both pending and queued transactions match up
+ pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1
+ pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1
+
+ pending, queued := pool.Stats()
+ if pending != 3 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
+ }
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ if err := validateEvents(events, 3); err != nil {
+ t.Fatalf("original event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+
+ // Ensure that adding an underpriced transaction fails
+ tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])
+ if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1
+ t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
+ }
+
+ // Ensure that adding high priced transactions drops cheap ones, but not own
+ tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1])
+ if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+
+ tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1])
+ if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+ tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1])
+ if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3
+ t.Fatalf("failed to add well priced transaction: %v", err)
+ }
+ pending, queued = pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 2 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
+ }
+ if err := validateEvents(events, 2); err != nil {
+ t.Fatalf("additional event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Ensure that adding local transactions can push out even higher priced ones
+ ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2])
+ if err := pool.addLocal(ltx); err != nil {
+ t.Fatalf("failed to append underpriced local transaction: %v", err)
+ }
+ ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3])
+ if err := pool.addLocal(ltx); err != nil {
+ t.Fatalf("failed to add new underpriced local transaction: %v", err)
+ }
+ pending, queued = pool.Stats()
+ if pending != 3 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3)
+ }
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ if err := validateEvents(events, 2); err != nil {
+ t.Fatalf("local event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests whether highest fee cap transaction is retained after a batch of high effective
+// tip transactions are added and vice versa
+func TestDualHeapEvictionNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ pool, _ := setupPoolWithConfig(eip1559Config, false)
+ defer pool.Close()
+
+ pool.config.GlobalSlots = 10
+ pool.config.GlobalQueue = 10
+
+ var (
+ highTip, highCap *types.Transaction
+ baseFee int
+ )
+
+ check := func(tx *types.Transaction, name string) {
+ if pool.all.GetRemote(tx.Hash()) == nil {
+ t.Fatalf("highest %s transaction evicted from the pool", name)
+ }
+ }
+
+ add := func(urgent bool) {
+ for i := 0; i < 20; i++ {
+ var tx *types.Transaction
+ // Create a test accounts and fund it
+ key, _ := crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000))
+ if urgent {
+ tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key)
+ highTip = tx
+ } else {
+ tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key)
+ highCap = tx
+ }
+ pool.addRemotesSync([]*types.Transaction{tx})
+ }
+ pending, queued := pool.Stats()
+ if pending+queued != 20 {
+ t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10)
+ }
+ }
+
+ add(false)
+ for baseFee = 0; baseFee <= 1000; baseFee += 100 {
+ pool.priced.SetBaseFee(big.NewInt(int64(baseFee)))
+ add(true)
+ check(highCap, "fee cap")
+ add(false)
+ check(highTip, "effective tip")
+ }
+
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that the pool rejects duplicate transactions.
+func TestDeduplicationNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ pool := New(testTxPoolConfig, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create a test account to add transactions with
+ key, _ := crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
+
+ // Create a batch of transactions and add a few of them
+ txs := make([]*types.Transaction, 16)
+ for i := 0; i < len(txs); i++ {
+ txs[i] = pricedTransaction(uint64(i), 100000, big.NewInt(1), key)
+ }
+ var firsts []*types.Transaction
+ for i := 0; i < len(txs); i += 2 {
+ firsts = append(firsts, txs[i])
+ }
+ errs := pool.addRemotesSync(firsts)
+ if len(errs) != len(firsts) {
+ t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts))
+ }
+ for i, err := range errs {
+ if err != nil {
+ t.Errorf("add %d failed: %v", i, err)
+ }
+ }
+ pending, queued := pool.Stats()
+ if pending != 1 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1)
+ }
+ if queued != len(txs)/2-1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1)
+ }
+ // Try to add all of them now and ensure previous ones error out as knowns
+ errs = pool.addRemotesSync(txs)
+ if len(errs) != len(txs) {
+ t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs))
+ }
+ for i, err := range errs {
+ if i%2 == 0 && err == nil {
+ t.Errorf("add %d succeeded, should have failed as known", i)
+ }
+ if i%2 == 1 && err != nil {
+ t.Errorf("add %d failed: %v", i, err)
+ }
+ }
+ pending, queued = pool.Stats()
+ if pending != len(txs) {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs))
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that the pool rejects replacement transactions that don't meet the minimum
+// price bump required.
+func TestReplacementNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ pool := New(testTxPoolConfig, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, 32)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Create a test account to add transactions with
+ key, _ := crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
+
+ // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
+ price := int64(100)
+ threshold := (price * (100 + int64(testTxPoolConfig.PriceBump))) / 100
+
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil {
+ t.Fatalf("failed to add original cheap pending transaction: %v", err)
+ }
+ if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
+ }
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil {
+ t.Fatalf("failed to replace original cheap pending transaction: %v", err)
+ }
+ if err := validateEvents(events, 2); err != nil {
+ t.Fatalf("cheap replacement event firing failed: %v", err)
+ }
+
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil {
+ t.Fatalf("failed to add original proper pending transaction: %v", err)
+ }
+ if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
+ }
+ if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil {
+ t.Fatalf("failed to replace original proper pending transaction: %v", err)
+ }
+ if err := validateEvents(events, 2); err != nil {
+ t.Fatalf("proper replacement event firing failed: %v", err)
+ }
+
+ // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil {
+ t.Fatalf("failed to add original cheap queued transaction: %v", err)
+ }
+ if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
+ }
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil {
+ t.Fatalf("failed to replace original cheap queued transaction: %v", err)
+ }
+
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil {
+ t.Fatalf("failed to add original proper queued transaction: %v", err)
+ }
+ if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced)
+ }
+ if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil {
+ t.Fatalf("failed to replace original proper queued transaction: %v", err)
+ }
+
+ if err := validateEvents(events, 0); err != nil {
+ t.Fatalf("queued replacement event firing failed: %v", err)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that the pool rejects replacement dynamic fee transactions that don't
+// meet the minimum price bump required.
+func TestReplacementDynamicFeeNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the pricing enforcement with
+ pool, key := setupPoolWithConfig(eip1559Config, false)
+ defer pool.Close()
+ testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
+
+ // Keep track of transaction events to ensure all executables get announced
+ events := make(chan core.NewTxsEvent, 32)
+ sub := pool.txFeed.Subscribe(events)
+ defer sub.Unsubscribe()
+
+ // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too)
+ gasFeeCap := int64(100)
+ feeCapThreshold := (gasFeeCap * (100 + int64(testTxPoolConfig.PriceBump))) / 100
+ gasTipCap := int64(60)
+ tipThreshold := (gasTipCap * (100 + int64(testTxPoolConfig.PriceBump))) / 100
+
+ // Run the following identical checks for both the pending and queue pools:
+ // 1. Send initial tx => accept
+ // 2. Don't bump tip or fee cap => discard
+ // 3. Bump both more than min => accept
+ // 4. Check events match expected (2 new executable txs during pending, 0 during queue)
+ // 5. Send new tx with larger tip and gasFeeCap => accept
+ // 6. Bump tip max allowed so it's still underpriced => discard
+ // 7. Bump fee cap max allowed so it's still underpriced => discard
+ // 8. Bump tip min for acceptance => discard
+ // 9. Bump feecap min for acceptance => discard
+ // 10. Bump feecap and tip min for acceptance => accept
+ // 11. Check events match expected (2 new executable txs during pending, 0 during queue)
+ stages := []string{"pending", "queued"}
+ for _, stage := range stages {
+ // Since state is empty, 0 nonce txs are "executable" and can go
+ // into pending immediately. 2 nonce txs are "gapped"
+ nonce := uint64(0)
+ if stage == "queued" {
+ nonce = 2
+ }
+
+ // 1. Send initial tx => accept
+ tx := dynamicFeeTx(nonce, 100000, big.NewInt(2), big.NewInt(1), key)
+ if err := pool.addRemoteSync(tx); err != nil {
+ t.Fatalf("failed to add original cheap %s transaction: %v", stage, err)
+ }
+ // 2. Don't bump tip or feecap => discard
+ tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
+ }
+ // 3. Bump both more than min => accept
+ tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key)
+ if err := pool.addRemote(tx); err != nil {
+ t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err)
+ }
+ // 4. Check events match expected (2 new executable txs during pending, 0 during queue)
+ count := 2
+ if stage == "queued" {
+ count = 0
+ }
+ if err := validateEvents(events, count); err != nil {
+ t.Fatalf("cheap %s replacement event firing failed: %v", stage, err)
+ }
+ // 5. Send new tx with larger tip and feeCap => accept
+ tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(gasTipCap), key)
+ if err := pool.addRemoteSync(tx); err != nil {
+ t.Fatalf("failed to add original proper %s transaction: %v", stage, err)
+ }
+ // 6. Bump tip max allowed so it's still underpriced => discard
+ tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
+ }
+ // 7. Bump fee cap max allowed so it's still underpriced => discard
+ tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
+ }
+ // 8. Bump tip min for acceptance => accept
+ tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
+ }
+ // 9. Bump fee cap min for acceptance => accept
+ tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key)
+ if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced {
+ t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced)
+ }
+ // 10. Check events match expected (3 new executable txs during pending, 0 during queue)
+ tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key)
+ if err := pool.addRemote(tx); err != nil {
+ t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err)
+ }
+ // 11. Check events match expected (3 new executable txs during pending, 0 during queue)
+ count = 2
+ if stage == "queued" {
+ count = 0
+ }
+ if err := validateEvents(events, count); err != nil {
+ t.Fatalf("replacement %s event firing failed: %v", stage, err)
+ }
+ }
+
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+}
+
+// Tests that local transactions are journaled to disk, but remote transactions
+// get discarded between restarts.
+func TestJournalingNoAuctioneer(t *testing.T) { testJournalingNoAuctioneer(t, false) }
+func TestJournalingNoLocalsNoAuctioneer(t *testing.T) { testJournalingNoAuctioneer(t, true) }
+
+func testJournalingNoAuctioneer(t *testing.T, nolocals bool) {
+ t.Parallel()
+
+ // Create a temporary file for the journal
+ file, err := os.CreateTemp("", "")
+ if err != nil {
+ t.Fatalf("failed to create temporary journal: %v", err)
+ }
+ journal := file.Name()
+ defer os.Remove(journal)
+
+ // Clean up the temporary file, we only need the path for now
+ file.Close()
+ os.Remove(journal)
+
+ // Create the original pool to inject transaction into the journal
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ config := testTxPoolConfig
+ config.NoLocals = nolocals
+ config.Journal = journal
+ config.Rejournal = time.Second
+
+ pool := New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+
+ // Create two test accounts to ensure remotes expire but locals do not
+ local, _ := crypto.GenerateKey()
+ remote, _ := crypto.GenerateKey()
+
+ testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000))
+ testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000))
+
+ // Add three local and a remote transactions and ensure they are queued up
+ if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add local transaction: %v", err)
+ }
+ if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add local transaction: %v", err)
+ }
+ if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
+ t.Fatalf("failed to add local transaction: %v", err)
+ }
+ if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil {
+ t.Fatalf("failed to add remote transaction: %v", err)
+ }
+ pending, queued := pool.Stats()
+ if pending != 4 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4)
+ }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive
+ pool.Close()
+ statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
+ blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ pool = New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+
+ pending, queued = pool.Stats()
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ if nolocals {
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ } else {
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Bump the nonce temporarily and ensure the newly invalidated transaction is removed
+ statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
+ <-pool.requestReset(nil, nil)
+ time.Sleep(2 * config.Rejournal)
+ pool.Close()
+
+ statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
+ blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+ pool = New(config, blockchain, false)
+ pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+
+ pending, queued = pool.Stats()
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
+ }
+ if nolocals {
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
+ }
+ } else {
+ if queued != 1 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ }
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ pool.Close()
+}
+
+// TestStatusCheck tests that the pool can correctly retrieve the
+// pending status of individual transactions.
+func TestStatusCheckNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ // Create the pool to test the status retrievals with
+ statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
+ blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
+
+ pool := New(testTxPoolConfig, blockchain, false)
+ pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ defer pool.Close()
+
+ // Create the test accounts to check various transaction statuses with
+ keys := make([]*ecdsa.PrivateKey, 3)
+ for i := 0; i < len(keys); i++ {
+ keys[i], _ = crypto.GenerateKey()
+ testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
+ }
+ // Generate and queue a batch of transactions, both pending and queued
+ txs := types.Transactions{}
+
+ txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) // Pending only
+ txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[1])) // Pending and queued
+ txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[1]))
+ txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only
+
+ // Import the transaction and ensure they are correctly added
+ pool.addRemotesSync(txs)
+
+ pending, queued := pool.Stats()
+ if pending != 2 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
+ }
+ if queued != 2 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
+ }
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
+ }
+ // Retrieve the status of each transaction and validate them
+ hashes := make([]common.Hash, len(txs))
+ for i, tx := range txs {
+ hashes[i] = tx.Hash()
+ }
+ hashes = append(hashes, common.Hash{})
+ expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown}
+
+ for i := 0; i < len(hashes); i++ {
+ if status := pool.Status(hashes[i]); status != expect[i] {
+ t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i])
+ }
+ }
+}
+
+// Test the transaction slots consumption is computed correctly
+func TestSlotCountNoAuctioneer(t *testing.T) {
+ t.Parallel()
+
+ key, _ := crypto.GenerateKey()
+
+ // Check that an empty transaction consumes a single slot
+ smallTx := pricedDataTransaction(0, 0, big.NewInt(0), key, 0)
+ if slots := numSlots(smallTx); slots != 1 {
+ t.Fatalf("small transactions slot count mismatch: have %d want %d", slots, 1)
+ }
+ // Check that a large transaction consumes the correct number of slots
+ bigTx := pricedDataTransaction(0, 0, big.NewInt(0), key, uint64(10*txSlotSize))
+ if slots := numSlots(bigTx); slots != 11 {
+ t.Fatalf("big transactions slot count mismatch: have %d want %d", slots, 11)
+ }
+}
+
+// Benchmarks the speed of validating the contents of the pending queue of the
+// transaction pool.
+func BenchmarkPendingDemotion100NoAuctioneer(b *testing.B) {
+ benchmarkPendingDemotionNoAuctioneer(b, 100)
+}
+func BenchmarkPendingDemotion1000NoAuctioneer(b *testing.B) {
+ benchmarkPendingDemotionNoAuctioneer(b, 1000)
+}
+func BenchmarkPendingDemotion10000NoAuctioneer(b *testing.B) {
+ benchmarkPendingDemotionNoAuctioneer(b, 10000)
+}
+
+func benchmarkPendingDemotionNoAuctioneer(b *testing.B, size int) {
+ // Add a batch of transactions to a pool one by one
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000000))
+
+ for i := 0; i < size; i++ {
+ tx := transaction(uint64(i), 100000, key)
+ pool.promoteTx(account, tx.Hash(), tx)
+ }
+ // Benchmark the speed of pool validation
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pool.demoteUnexecutables()
+ }
+}
+
+// Benchmarks the speed of scheduling the contents of the future queue of the
+// transaction pool.
+func BenchmarkFuturePromotion100NoAuctioneer(b *testing.B) {
+ benchmarkFuturePromotionNoAuctioneer(b, 100)
+}
+func BenchmarkFuturePromotion1000NoAuctioneer(b *testing.B) {
+ benchmarkFuturePromotionNoAuctioneer(b, 1000)
+}
+func BenchmarkFuturePromotion10000NoAuctioneer(b *testing.B) {
+ benchmarkFuturePromotionNoAuctioneer(b, 10000)
+}
+
+func benchmarkFuturePromotionNoAuctioneer(b *testing.B, size int) {
+ // Add a batch of transactions to a pool one by one
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000000))
+
+ for i := 0; i < size; i++ {
+ tx := transaction(uint64(1+i), 100000, key)
+ pool.enqueueTx(tx.Hash(), tx, false, true)
+ }
+ // Benchmark the speed of pool validation
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ pool.promoteExecutables(nil)
+ }
+}
+
+// Benchmarks the speed of batched transaction insertion.
+func BenchmarkBatchInsert100NoAuctioneer(b *testing.B) {
+ benchmarkBatchInsertNoAuctioneer(b, 100, false)
+}
+func BenchmarkBatchInsert1000NoAuctioneer(b *testing.B) {
+ benchmarkBatchInsertNoAuctioneer(b, 1000, false)
+}
+func BenchmarkBatchInsert10000NoAuctioneer(b *testing.B) {
+ benchmarkBatchInsertNoAuctioneer(b, 10000, false)
+}
+
+func BenchmarkBatchLocalInsert100NoAuctioneer(b *testing.B) {
+ benchmarkBatchInsertNoAuctioneer(b, 100, true)
+}
+func BenchmarkBatchLocalInsert1000NoAuctioneer(b *testing.B) {
+ benchmarkBatchInsertNoAuctioneer(b, 1000, true)
+}
+func BenchmarkBatchLocalInsert10000NoAuctioneer(b *testing.B) {
+ benchmarkBatchInsertNoAuctioneer(b, 10000, true)
+}
+
+func benchmarkBatchInsertNoAuctioneer(b *testing.B, size int, local bool) {
+ // Generate a batch of transactions to enqueue into the pool
+ pool, key := setupPool(false)
+ defer pool.Close()
+
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ testAddBalance(pool, account, big.NewInt(1000000000000000000))
+
+ batches := make([]types.Transactions, b.N)
+ for i := 0; i < b.N; i++ {
+ batches[i] = make(types.Transactions, size)
+ for j := 0; j < size; j++ {
+ batches[i][j] = transaction(uint64(size*i+j), 100000, key)
+ }
+ }
+ // Benchmark importing the transactions into the queue
+ b.ResetTimer()
+ for _, batch := range batches {
+ if local {
+ pool.addLocals(batch)
+ } else {
+ pool.addRemotes(batch)
+ }
+ }
+}
+
+func BenchmarkInsertRemoteWithAllLocalsNoAuctioneer(b *testing.B) {
+ // Allocate keys for testing
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+
+ remoteKey, _ := crypto.GenerateKey()
+ remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey)
+
+ locals := make([]*types.Transaction, 4096+1024) // Occupy all slots
+ for i := 0; i < len(locals); i++ {
+ locals[i] = transaction(uint64(i), 100000, key)
+ }
+ remotes := make([]*types.Transaction, 1000)
+ for i := 0; i < len(remotes); i++ {
+ remotes[i] = pricedTransaction(uint64(i), 100000, big.NewInt(2), remoteKey) // Higher gasprice
+ }
+ // Benchmark importing the transactions into the queue
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ b.StopTimer()
+ pool, _ := setupPool(false)
+ testAddBalance(pool, account, big.NewInt(100000000))
+ for _, local := range locals {
+ pool.addLocal(local)
+ }
+ b.StartTimer()
+ // Assign a high enough balance for testing
+ testAddBalance(pool, remoteAddr, big.NewInt(100000000))
+ for i := 0; i < len(remotes); i++ {
+ pool.addRemotes([]*types.Transaction{remotes[i]})
+ }
+ pool.Close()
+ }
+}
+
+// Benchmarks the speed of batch transaction insertion in case of multiple accounts.
+func BenchmarkMultiAccountBatchInsertNoAuctioneer(b *testing.B) {
+ // Generate a batch of transactions to enqueue into the pool
+ pool, _ := setupPool(false)
+ defer pool.Close()
+ b.ReportAllocs()
+ batches := make(types.Transactions, b.N)
+ for i := 0; i < b.N; i++ {
+ key, _ := crypto.GenerateKey()
+ account := crypto.PubkeyToAddress(key.PublicKey)
+ pool.currentState.AddBalance(account, uint256.NewInt(1000000), tracing.BalanceChangeUnspecified)
+ tx := transaction(uint64(0), 100000, key)
+ batches[i] = tx
+ }
+ // Benchmark importing the transactions into the queue
+ b.ResetTimer()
+ for _, tx := range batches {
+ pool.addRemotesSync([]*types.Transaction{tx})
+ }
+}
diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go
index e35a42fed..a1395bebc 100644
--- a/core/txpool/legacypool/legacypool_test.go
+++ b/core/txpool/legacypool/legacypool_test.go
@@ -13,7 +13,6 @@
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-
package legacypool
import (
@@ -160,16 +159,16 @@ func makeAddressReserver() txpool.AddressReserver {
}
}
-func setupPool() (*LegacyPool, *ecdsa.PrivateKey) {
- return setupPoolWithConfig(params.TestChainConfig)
+func setupPool(auctioneerEnabled bool) (*LegacyPool, *ecdsa.PrivateKey) {
+ return setupPoolWithConfig(params.TestChainConfig, auctioneerEnabled)
}
-func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) {
+func setupPoolWithConfig(config *params.ChainConfig, auctioneerEnabled bool) (*LegacyPool, *ecdsa.PrivateKey) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed))
key, _ := crypto.GenerateKey()
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, auctioneerEnabled)
if err := pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()); err != nil {
panic(err)
}
@@ -285,7 +284,7 @@ func TestStateChangeDuringReset(t *testing.T) {
tx0 := transaction(0, 100000, key)
tx1 := transaction(1, 100000, key)
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -301,14 +300,30 @@ func TestStateChangeDuringReset(t *testing.T) {
t.Fatalf("Invalid nonce, want 2, got %d", nonce)
}
+ mempoolClearedCh := make(chan core.NewMempoolCleared, 1)
+ mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh)
+ defer mempoolClearedSub.Unsubscribe()
+
// trigger state change in the background
trigger = true
<-pool.requestReset(nil, nil)
nonce = pool.Nonce(address)
- if nonce != 2 {
+ // mempool is cleared
+ if nonce != 0 {
t.Fatalf("Invalid nonce, want 2, got %d", nonce)
}
+
+ select {
+ case mempoolClear := <-mempoolClearedCh:
+ if mempoolClear.NewHead != nil {
+ t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead)
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("Mempool cleared event not received")
+ case err := <-mempoolClearedSub.Err():
+ t.Fatalf("Mempool cleared subscription error: %v", err)
+ }
}
func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) {
@@ -326,7 +341,7 @@ func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) {
func TestInvalidTransactions(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
tx := transaction(0, 100, key)
@@ -364,7 +379,7 @@ func TestInvalidTransactions(t *testing.T) {
func TestQueue(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
tx := transaction(0, 100, key)
@@ -395,7 +410,7 @@ func TestQueue(t *testing.T) {
func TestQueue2(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
tx1 := transaction(0, 100, key)
@@ -421,7 +436,7 @@ func TestQueue2(t *testing.T) {
func TestNegativeValue(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key)
@@ -435,7 +450,7 @@ func TestNegativeValue(t *testing.T) {
func TestTipAboveFeeCap(t *testing.T) {
t.Parallel()
- pool, key := setupPoolWithConfig(eip1559Config)
+ pool, key := setupPoolWithConfig(eip1559Config, true)
defer pool.Close()
tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key)
@@ -448,7 +463,7 @@ func TestTipAboveFeeCap(t *testing.T) {
func TestVeryHighValues(t *testing.T) {
t.Parallel()
- pool, key := setupPoolWithConfig(eip1559Config)
+ pool, key := setupPoolWithConfig(eip1559Config, true)
defer pool.Close()
veryBigNumber := big.NewInt(1)
@@ -468,7 +483,7 @@ func TestVeryHighValues(t *testing.T) {
func TestChainFork(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -497,7 +512,7 @@ func TestChainFork(t *testing.T) {
func TestRemoveTxSanity(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -558,7 +573,7 @@ func TestRemoveTxSanity(t *testing.T) {
func TestDoubleNonce(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -609,7 +624,7 @@ func TestDoubleNonce(t *testing.T) {
func TestMissingNonce(t *testing.T) {
t.Parallel()
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -633,7 +648,7 @@ func TestNonceRecovery(t *testing.T) {
t.Parallel()
const n = 10
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
addr := crypto.PubkeyToAddress(key.PublicKey)
@@ -659,7 +674,7 @@ func TestDropping(t *testing.T) {
t.Parallel()
// Create a test account and fund it
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -674,21 +689,15 @@ func TestDropping(t *testing.T) {
tx11 = transaction(11, 200, key)
tx12 = transaction(12, 300, key)
)
- pool.all.Add(tx0, false)
- pool.priced.Put(tx0, false)
- pool.promoteTx(account, tx0.Hash(), tx0)
-
- pool.all.Add(tx1, false)
- pool.priced.Put(tx1, false)
- pool.promoteTx(account, tx1.Hash(), tx1)
- pool.all.Add(tx2, false)
- pool.priced.Put(tx2, false)
- pool.promoteTx(account, tx2.Hash(), tx2)
+ pool.add(tx0, false)
+ pool.add(tx1, false)
+ pool.add(tx2, false)
+ pool.add(tx10, false)
+ pool.add(tx11, false)
+ pool.add(tx12, false)
- pool.enqueueTx(tx10.Hash(), tx10, false, true)
- pool.enqueueTx(tx11.Hash(), tx11, false, true)
- pool.enqueueTx(tx12.Hash(), tx12, false, true)
+ pool.promoteExecutables([]common.Address{account})
// Check that pre and post validations leave the pool as is
if pool.pending[account].Len() != 3 {
@@ -700,59 +709,34 @@ func TestDropping(t *testing.T) {
if pool.all.Count() != 6 {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
}
- <-pool.requestReset(nil, nil)
- if pool.pending[account].Len() != 3 {
- t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3)
- }
- if pool.queue[account].Len() != 3 {
- t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3)
- }
- if pool.all.Count() != 6 {
- t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6)
- }
- // Reduce the balance of the account, and check that invalidated transactions are dropped
- testAddBalance(pool, account, big.NewInt(-650))
- <-pool.requestReset(nil, nil)
- if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
- t.Errorf("funded pending transaction missing: %v", tx0)
- }
- if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok {
- t.Errorf("funded pending transaction missing: %v", tx0)
- }
- if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok {
- t.Errorf("out-of-fund pending transaction present: %v", tx1)
- }
- if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
- t.Errorf("funded queued transaction missing: %v", tx10)
- }
- if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok {
- t.Errorf("funded queued transaction missing: %v", tx10)
- }
- if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok {
- t.Errorf("out-of-fund queued transaction present: %v", tx11)
- }
- if pool.all.Count() != 4 {
- t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4)
- }
- // Reduce the block gas limit, check that invalidated transactions are dropped
- pool.chain.(*testBlockChain).gasLimit.Store(100)
+ mempoolClearedCh := make(chan core.NewMempoolCleared, 1)
+ mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh)
+ defer mempoolClearedSub.Unsubscribe()
+
<-pool.requestReset(nil, nil)
- if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok {
- t.Errorf("funded pending transaction missing: %v", tx0)
- }
- if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok {
- t.Errorf("over-gased pending transaction present: %v", tx1)
+ pending, queued := pool.Stats()
+
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
}
- if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok {
- t.Errorf("funded queued transaction missing: %v", tx10)
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
- if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok {
- t.Errorf("over-gased queued transaction present: %v", tx11)
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
}
- if pool.all.Count() != 2 {
- t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2)
+
+ select {
+ case mempoolClear := <-mempoolClearedCh:
+ if mempoolClear.NewHead != nil {
+ t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead)
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("Mempool cleared event not received")
+ case err := <-mempoolClearedSub.Err():
+ t.Fatalf("Mempool cleared subscription error: %v", err)
}
}
@@ -766,7 +750,7 @@ func TestPostponing(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -808,65 +792,35 @@ func TestPostponing(t *testing.T) {
if pool.all.Count() != len(txs) {
t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
}
- <-pool.requestReset(nil, nil)
- if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) {
- t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs))
- }
- if len(pool.queue) != 0 {
- t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0)
- }
- if pool.all.Count() != len(txs) {
- t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs))
- }
- // Reduce the balance of the account, and check that transactions are reorganised
- for _, addr := range accs {
- testAddBalance(pool, addr, big.NewInt(-1))
- }
+
+ mempoolClearedCh := make(chan core.NewMempoolCleared, 1)
+ mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh)
+ defer mempoolClearedSub.Unsubscribe()
+
<-pool.requestReset(nil, nil)
- // The first account's first transaction remains valid, check that subsequent
- // ones are either filtered out, or queued up for later.
- if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok {
- t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0])
- }
- if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok {
- t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0])
+ pending, queued := pool.Stats()
+
+ if pending != 0 {
+ t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
}
- for i, tx := range txs[1:100] {
- if i%2 == 1 {
- if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
- t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx)
- }
- if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok {
- t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx)
- }
- } else {
- if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok {
- t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx)
- }
- if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok {
- t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx)
- }
- }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
- // The second account's first transaction got invalid, check that all transactions
- // are either filtered out, or queued up for later.
- if pool.pending[accs[1]] != nil {
- t.Errorf("invalidated account still has pending transactions")
+
+ if err := validatePoolInternals(pool); err != nil {
+ t.Fatalf("pool internal state corrupted: %v", err)
}
- for i, tx := range txs[100:] {
- if i%2 == 1 {
- if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok {
- t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx)
- }
- } else {
- if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok {
- t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx)
- }
+
+ select {
+ case mempoolClear := <-mempoolClearedCh:
+ if mempoolClear.NewHead != nil {
+ t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead)
}
- }
- if pool.all.Count() != len(txs)/2 {
- t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2)
+ case <-time.After(1 * time.Second):
+ t.Fatalf("Mempool cleared event not received")
+ case err := <-mempoolClearedSub.Err():
+ t.Fatalf("Mempool cleared subscription error: %v", err)
}
}
@@ -877,7 +831,7 @@ func TestGapFilling(t *testing.T) {
t.Parallel()
// Create a test account and fund it
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -931,7 +885,7 @@ func TestQueueAccountLimiting(t *testing.T) {
t.Parallel()
// Create a test account and fund it
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -960,96 +914,6 @@ func TestQueueAccountLimiting(t *testing.T) {
}
}
-// Tests that if the transaction count belonging to multiple accounts go above
-// some threshold, the higher transactions are dropped to prevent DOS attacks.
-//
-// This logic should not hold for local transactions, unless the local tracking
-// mechanism is disabled.
-func TestQueueGlobalLimiting(t *testing.T) {
- testQueueGlobalLimiting(t, false)
-}
-func TestQueueGlobalLimitingNoLocals(t *testing.T) {
- testQueueGlobalLimiting(t, true)
-}
-
-func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
- t.Parallel()
-
- // Create the pool to test the limit enforcement with
- statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
- blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
-
- config := testTxPoolConfig
- config.NoLocals = nolocals
- config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible)
-
- pool := New(config, blockchain)
- pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
- defer pool.Close()
-
- // Create a number of test accounts and fund them (last one will be the local)
- keys := make([]*ecdsa.PrivateKey, 5)
- for i := 0; i < len(keys); i++ {
- keys[i], _ = crypto.GenerateKey()
- testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000))
- }
- local := keys[len(keys)-1]
-
- // Generate and queue a batch of transactions
- nonces := make(map[common.Address]uint64)
-
- txs := make(types.Transactions, 0, 3*config.GlobalQueue)
- for len(txs) < cap(txs) {
- key := keys[rand.Intn(len(keys)-1)] // skip adding transactions with the local account
- addr := crypto.PubkeyToAddress(key.PublicKey)
-
- txs = append(txs, transaction(nonces[addr]+1, 100000, key))
- nonces[addr]++
- }
- // Import the batch and verify that limits have been enforced
- pool.addRemotesSync(txs)
-
- queued := 0
- for addr, list := range pool.queue {
- if list.Len() > int(config.AccountQueue) {
- t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue)
- }
- queued += list.Len()
- }
- if queued > int(config.GlobalQueue) {
- t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue)
- }
- // Generate a batch of transactions from the local account and import them
- txs = txs[:0]
- for i := uint64(0); i < 3*config.GlobalQueue; i++ {
- txs = append(txs, transaction(i+1, 100000, local))
- }
- pool.addLocals(txs)
-
- // If locals are disabled, the previous eviction algorithm should apply here too
- if nolocals {
- queued := 0
- for addr, list := range pool.queue {
- if list.Len() > int(config.AccountQueue) {
- t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue)
- }
- queued += list.Len()
- }
- if queued > int(config.GlobalQueue) {
- t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue)
- }
- } else {
- // Local exemptions are enabled, make sure the local account owned the queue
- if len(pool.queue) != 1 {
- t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1)
- }
- // Also ensure no local transactions are ever dropped, even if above global limits
- if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue {
- t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue)
- }
- }
-}
-
// Tests that if an account remains idle for a prolonged amount of time, any
// non-executable transactions queued up are dropped to prevent wasting resources
// on shuffling them around.
@@ -1059,6 +923,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) {
func TestQueueTimeLimiting(t *testing.T) {
testQueueTimeLimiting(t, false)
}
+
func TestQueueTimeLimitingNoLocals(t *testing.T) {
testQueueTimeLimiting(t, true)
}
@@ -1076,7 +941,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
config.Lifetime = time.Second
config.NoLocals = nolocals
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1143,6 +1008,11 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
// remove current transactions and increase nonce to prepare for a reset and cleanup
statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2)
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
+
+ mempoolClearedCh := make(chan core.NewMempoolCleared, 1)
+ mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh)
+ defer mempoolClearedSub.Unsubscribe()
+
<-pool.requestReset(nil, nil)
// make sure queue, pending are cleared
@@ -1157,53 +1027,15 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
t.Fatalf("pool internal state corrupted: %v", err)
}
- // Queue gapped transactions
- if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil {
- t.Fatalf("failed to add remote transaction: %v", err)
- }
- if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil {
- t.Fatalf("failed to add remote transaction: %v", err)
- }
- time.Sleep(5 * evictionInterval) // A half lifetime pass
-
- // Queue executable transactions, the life cycle should be restarted.
- if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil {
- t.Fatalf("failed to add remote transaction: %v", err)
- }
- if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil {
- t.Fatalf("failed to add remote transaction: %v", err)
- }
- time.Sleep(6 * evictionInterval)
-
- // All gapped transactions shouldn't be kicked out
- pending, queued = pool.Stats()
- if pending != 2 {
- t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
- }
- if queued != 2 {
- t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
- }
- if err := validatePoolInternals(pool); err != nil {
- t.Fatalf("pool internal state corrupted: %v", err)
- }
-
- // The whole life time pass after last promotion, kick out stale transactions
- time.Sleep(2 * config.Lifetime)
- pending, queued = pool.Stats()
- if pending != 2 {
- t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
- }
- if nolocals {
- if queued != 0 {
- t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
- }
- } else {
- if queued != 1 {
- t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
+ select {
+ case mempoolClear := <-mempoolClearedCh:
+ if mempoolClear.NewHead != nil {
+ t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead)
}
- }
- if err := validatePoolInternals(pool); err != nil {
- t.Fatalf("pool internal state corrupted: %v", err)
+ case <-time.After(1 * time.Second):
+ t.Fatalf("Mempool cleared event not received")
+ case err := <-mempoolClearedSub.Err():
+ t.Fatalf("Mempool cleared subscription error: %v", err)
}
}
@@ -1214,7 +1046,7 @@ func TestPendingLimiting(t *testing.T) {
t.Parallel()
// Create a test account and fund it
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -1261,7 +1093,7 @@ func TestPendingGlobalLimiting(t *testing.T) {
config := testTxPoolConfig
config.GlobalSlots = config.AccountSlots * 10
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1304,7 +1136,7 @@ func TestAllowedTxSize(t *testing.T) {
t.Parallel()
// Create a test account and fund it
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -1365,7 +1197,7 @@ func TestCapClearsFromAll(t *testing.T) {
config.AccountQueue = 2
config.GlobalSlots = 8
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1398,7 +1230,7 @@ func TestPendingMinimumAllowance(t *testing.T) {
config := testTxPoolConfig
config.GlobalSlots = 1
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1444,7 +1276,7 @@ func TestRepricing(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1566,7 +1398,7 @@ func TestMinGasPriceEnforced(t *testing.T) {
txPoolConfig := DefaultConfig
txPoolConfig.NoLocals = true
- pool := New(txPoolConfig, blockchain)
+ pool := New(txPoolConfig, blockchain, true)
pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1610,7 +1442,7 @@ func TestRepricingDynamicFee(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- pool, _ := setupPoolWithConfig(eip1559Config)
+ pool, _ := setupPoolWithConfig(eip1559Config, true)
defer pool.Close()
// Keep track of transaction events to ensure all executables get announced
@@ -1737,7 +1569,7 @@ func TestRepricingKeepsLocals(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1815,7 +1647,7 @@ func TestUnderpricing(t *testing.T) {
config.GlobalSlots = 2
config.GlobalQueue = 2
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1930,7 +1762,7 @@ func TestStableUnderpricing(t *testing.T) {
config.GlobalSlots = 128
config.GlobalQueue = 0
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -1992,7 +1824,7 @@ func TestStableUnderpricing(t *testing.T) {
func TestUnderpricingDynamicFee(t *testing.T) {
t.Parallel()
- pool, _ := setupPoolWithConfig(eip1559Config)
+ pool, _ := setupPoolWithConfig(eip1559Config, true)
defer pool.Close()
pool.config.GlobalSlots = 2
@@ -2099,7 +1931,7 @@ func TestUnderpricingDynamicFee(t *testing.T) {
func TestDualHeapEviction(t *testing.T) {
t.Parallel()
- pool, _ := setupPoolWithConfig(eip1559Config)
+ pool, _ := setupPoolWithConfig(eip1559Config, true)
defer pool.Close()
pool.config.GlobalSlots = 10
@@ -2159,7 +1991,7 @@ func TestDeduplication(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -2226,7 +2058,7 @@ func TestReplacement(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -2304,7 +2136,7 @@ func TestReplacementDynamicFee(t *testing.T) {
t.Parallel()
// Create the pool to test the pricing enforcement with
- pool, key := setupPoolWithConfig(eip1559Config)
+ pool, key := setupPoolWithConfig(eip1559Config, true)
defer pool.Close()
testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000))
@@ -2437,7 +2269,7 @@ func testJournaling(t *testing.T, nolocals bool) {
config.Journal = journal
config.Rejournal = time.Second
- pool := New(config, blockchain)
+ pool := New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
// Create two test accounts to ensure remotes expire but locals do not
@@ -2475,7 +2307,7 @@ func testJournaling(t *testing.T, nolocals bool) {
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool = New(config, blockchain)
+ pool = New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
pending, queued = pool.Stats()
@@ -2496,31 +2328,43 @@ func testJournaling(t *testing.T, nolocals bool) {
}
// Bump the nonce temporarily and ensure the newly invalidated transaction is removed
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2)
+
+ mempoolClearedCh := make(chan core.NewMempoolCleared, 1)
+ mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh)
+ defer mempoolClearedSub.Unsubscribe()
+
<-pool.requestReset(nil, nil)
time.Sleep(2 * config.Rejournal)
pool.Close()
statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1)
blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool = New(config, blockchain)
+ pool = New(config, blockchain, true)
pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
+ // tx mempool is cleared out completely after a reset
pending, queued = pool.Stats()
if pending != 0 {
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0)
}
- if nolocals {
- if queued != 0 {
- t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
- }
- } else {
- if queued != 1 {
- t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1)
- }
+ if queued != 0 {
+ t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0)
}
if err := validatePoolInternals(pool); err != nil {
t.Fatalf("pool internal state corrupted: %v", err)
}
+
+ select {
+ case mempoolClear := <-mempoolClearedCh:
+ if mempoolClear.NewHead != nil {
+ t.Fatalf("mempool clear event should not have a new head")
+ }
+ case <-time.After(1 * time.Second):
+ t.Fatalf("mempool clear event not received")
+ case err := <-mempoolClearedSub.Err():
+ t.Fatalf("mempool clear event subscription error: %v", err)
+ }
+
pool.Close()
}
@@ -2533,7 +2377,7 @@ func TestStatusCheck(t *testing.T) {
statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil)
blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed))
- pool := New(testTxPoolConfig, blockchain)
+ pool := New(testTxPoolConfig, blockchain, true)
pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver())
defer pool.Close()
@@ -2605,7 +2449,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1
func benchmarkPendingDemotion(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -2630,7 +2474,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1
func benchmarkFuturePromotion(b *testing.B, size int) {
// Add a batch of transactions to a pool one by one
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -2658,7 +2502,7 @@ func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 1000
func benchmarkBatchInsert(b *testing.B, size int, local bool) {
// Generate a batch of transactions to enqueue into the pool
- pool, key := setupPool()
+ pool, key := setupPool(true)
defer pool.Close()
account := crypto.PubkeyToAddress(key.PublicKey)
@@ -2702,7 +2546,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
b.StopTimer()
- pool, _ := setupPool()
+ pool, _ := setupPool(true)
testAddBalance(pool, account, big.NewInt(100000000))
for _, local := range locals {
pool.addLocal(local)
@@ -2720,7 +2564,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) {
// Benchmarks the speed of batch transaction insertion in case of multiple accounts.
func BenchmarkMultiAccountBatchInsert(b *testing.B) {
// Generate a batch of transactions to enqueue into the pool
- pool, _ := setupPool()
+ pool, _ := setupPool(true)
defer pool.Close()
b.ReportAllocs()
batches := make(types.Transactions, b.N)
diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go
index b749db44d..6b1a48b15 100644
--- a/core/txpool/legacypool/list.go
+++ b/core/txpool/legacypool/list.go
@@ -396,6 +396,35 @@ func (l *list) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactio
return removed, invalids
}
+func (l *list) ClearList() (types.Transactions, types.Transactions) {
+ // Filter out all the transactions
+ removed := l.txs.Filter(func(tx *types.Transaction) bool {
+ return true
+ })
+
+ if len(removed) == 0 {
+ return nil, nil
+ }
+
+ // TODO: we might not need the code below
+ var invalids types.Transactions
+ // If the list was strict, filter anything above the lowest nonce
+ if l.strict {
+ lowest := uint64(math.MaxUint64)
+ for _, tx := range removed {
+ if nonce := tx.Nonce(); lowest > nonce {
+ lowest = nonce
+ }
+ }
+ invalids = l.txs.filter(func(tx *types.Transaction) bool { return tx.Nonce() > lowest })
+ }
+ // Reset total cost
+ l.subTotalCost(removed)
+ l.subTotalCost(invalids)
+ l.txs.reheap()
+ return removed, invalids
+}
+
// Cap places a hard limit on the number of items, returning all transactions
// exceeding that limit.
func (l *list) Cap(threshold int) types.Transactions {
diff --git a/core/txpool/legacypool/list_test.go b/core/txpool/legacypool/list_test.go
index 8587c66f7..b46574867 100644
--- a/core/txpool/legacypool/list_test.go
+++ b/core/txpool/legacypool/list_test.go
@@ -13,7 +13,6 @@
//
// You should have received a copy of the GNU Lesser General Public License
// along with the go-ethereum library. If not, see .
-
package legacypool
import (
diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go
index be59ec861..0f640b493 100644
--- a/core/txpool/subpool.go
+++ b/core/txpool/subpool.go
@@ -140,6 +140,9 @@ type SubPool interface {
// or also for reorged out ones.
SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription
+ // SubscribeMempoolClearance subscribes to new mempool clearing events.
+ SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription
+
// Nonce returns the next nonce of an account, with all transactions executable
// by the pool already applied on top.
Nonce(addr common.Address) uint64
diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go
index 7767e205b..432433a96 100644
--- a/core/txpool/txpool.go
+++ b/core/txpool/txpool.go
@@ -78,22 +78,25 @@ type TxPool struct {
term chan struct{} // Termination channel to detect a closed pool
sync chan chan error // Testing / simulator channel to block until internal reset is done
+
+ auctioneerEnabled bool
}
// New creates a new transaction pool to gather, sort and filter inbound
// transactions from the network.
-func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) {
+func New(gasTip uint64, chain BlockChain, subpools []SubPool, auctioneerEnabled bool) (*TxPool, error) {
// Retrieve the current head so that all subpools and this main coordinator
// pool will have the same starting state, even if the chain moves forward
// during initialization.
head := chain.CurrentBlock()
pool := &TxPool{
- subpools: subpools,
- reservations: make(map[common.Address]SubPool),
- quit: make(chan chan error),
- term: make(chan struct{}),
- sync: make(chan chan error),
+ subpools: subpools,
+ reservations: make(map[common.Address]SubPool),
+ quit: make(chan chan error),
+ term: make(chan struct{}),
+ sync: make(chan chan error),
+ auctioneerEnabled: auctioneerEnabled,
}
for i, subpool := range subpools {
if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil {
@@ -192,6 +195,12 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) {
)
defer newOptimisticHeadSub.Unsubscribe()
+ var (
+ newHeadCh = make(chan core.ChainHeadEvent)
+ newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh)
+ )
+ defer newHeadSub.Unsubscribe()
+
// Track the previous and current head to feed to an idle reset
var (
oldHead = head
@@ -245,8 +254,15 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) {
// Wait for the next chain head event or a previous reset finish
select {
case event := <-newOptimisticHeadCh:
- // Chain moved forward, store the head for later consumption
- newHead = event.Block.Header()
+ if p.auctioneerEnabled {
+ // Chain moved forward, store the head for later consumption
+ newHead = event.Block.Header()
+ }
+ case event := <-newHeadCh:
+ if !p.auctioneerEnabled {
+ // Chain moved forward, store the head for later consumption
+ newHead = event.Block.Header()
+ }
case head := <-resetDone:
// Previous reset finished, update the old head and allow a new reset
@@ -431,6 +447,18 @@ func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool)
return p.subs.Track(event.JoinSubscriptions(subs...))
}
+// SubscribeMempoolClearance registers a subscription for new mempool clearance events
+func (p *TxPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription {
+ subs := []event.Subscription{}
+ for _, subpool := range p.subpools {
+ sub := subpool.SubscribeMempoolClearance(ch)
+ if sub != nil {
+ subs = append(subs, sub)
+ }
+ }
+ return p.subs.Track(event.JoinSubscriptions(subs...))
+}
+
// Nonce returns the next nonce of an account, with all transactions executable
// by the pool already applied on top.
func (p *TxPool) Nonce(addr common.Address) uint64 {
diff --git a/eth/api_backend.go b/eth/api_backend.go
index 304904365..2b5c820c4 100644
--- a/eth/api_backend.go
+++ b/eth/api_backend.go
@@ -143,6 +143,13 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe
}
return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil
}
+ if number == rpc.OptimisticBlockNumber {
+ header := b.eth.blockchain.CurrentOptimisticBlock()
+ if header == nil {
+ return nil, errors.New("optimistic block not found")
+ }
+ return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil
+ }
return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil
}
diff --git a/eth/backend.go b/eth/backend.go
index bea001c68..b24c4bf46 100644
--- a/eth/backend.go
+++ b/eth/backend.go
@@ -235,9 +235,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
if config.TxPool.Journal != "" {
config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal)
}
- legacyPool := legacypool.New(config.TxPool, eth.blockchain)
+ legacyPool := legacypool.New(config.TxPool, eth.blockchain, stack.AuctioneerEnabled())
- eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool})
+ eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}, stack.AuctioneerEnabled())
if err != nil {
return nil, err
}
diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go
index 934dadc9a..84cac22f8 100644
--- a/eth/protocols/eth/handler_test.go
+++ b/eth/protocols/eth/handler_test.go
@@ -116,8 +116,8 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int,
txconfig := legacypool.DefaultConfig
txconfig.Journal = "" // Don't litter the disk with test journals
- pool := legacypool.New(txconfig, chain)
- txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool})
+ pool := legacypool.New(txconfig, chain, true)
+ txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool}, true)
return &testBackend{
db: db,
diff --git a/genesis.json b/genesis.json
index d4ed69eac..e65d980fb 100644
--- a/genesis.json
+++ b/genesis.json
@@ -40,6 +40,9 @@
}
}
],
+ "astriaAuctioneerAddresses": {
+ "1": ""
+ },
"astriaFeeCollectors": {
"1": "0xaC21B97d35Bf75A7dAb16f35b111a50e78A72F30"
},
diff --git a/go.mod b/go.mod
index 1055cd47c..55ab2532b 100644
--- a/go.mod
+++ b/go.mod
@@ -3,10 +3,10 @@ module github.com/ethereum/go-ethereum
go 1.21
require (
- buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1
- buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1
- buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1
- buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1
+ buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2
+ buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1
+ buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1
+ buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1
github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0
github.com/Microsoft/go-winio v0.6.1
github.com/VictoriaMetrics/fastcache v1.12.1
@@ -15,7 +15,7 @@ require (
github.com/aws/aws-sdk-go-v2/credentials v1.13.43
github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2
github.com/btcsuite/btcd/btcec/v2 v2.2.0
- github.com/btcsuite/btcd/btcutil v1.1.5
+ github.com/btcsuite/btcd/btcutil v1.1.6
github.com/cespare/cp v0.1.0
github.com/cloudflare/cloudflare-go v0.79.0
github.com/cockroachdb/pebble v1.1.0
@@ -79,12 +79,13 @@ require (
golang.org/x/time v0.5.0
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d
google.golang.org/grpc v1.64.1
- google.golang.org/protobuf v1.35.1
+ google.golang.org/protobuf v1.36.2
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1
)
require (
+ filippo.io/edwards25519 v1.0.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect
github.com/DataDog/zstd v1.4.5 // indirect
@@ -100,6 +101,8 @@ require (
github.com/aws/smithy-go v1.15.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.10.0 // indirect
+ github.com/btcsuite/btcd v0.24.2 // indirect
+ github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cockroachdb/errors v1.11.1 // indirect
github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect
@@ -120,6 +123,7 @@ require (
github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-retryablehttp v0.7.4 // indirect
+ github.com/hdevalence/ed25519consensus v0.2.0 // indirect
github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/compress v1.15.15 // indirect
diff --git a/go.sum b/go.sum
index 83a47af38..992acbb76 100644
--- a/go.sum
+++ b/go.sum
@@ -1,11 +1,41 @@
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2 h1:9rMXnvPR2EX56tMIqbhOK+DvqKjWb++p5s1/bookIl8=
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2/go.mod h1:hdCXwnxpMeoqXK5LCQ6gLMcmMLUDX8T9+hbxYrtj+wQ=
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2 h1:W0lzc0sAzlzyKWWXLcuGW+GDsB9VRT+P/4ffP/hwJ4U=
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2/go.mod h1:jXiXYlSxLrhrUCAIuLq4cVcfXydbsz9mRVftWx/8eGs=
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 h1:wOry49zAbse0G4mt2tFTwa4P2AUMuYCR/0mYcPrpcbs=
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1/go.mod h1:+pVCkEpJNp2JtooS8NiydT7bO9+hu11XUZ5Z47DPtXo=
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1 h1:gS4erruX5XeMN0MZ7xe4JmEIR3uCWrvzG5HGV725WiI=
+buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1/go.mod h1:oXNLXPUVa006hXUuEk+z5isisNlEbrm0yS+XJeMj6u4=
buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1 h1:v7QnrDjNmG7I/0aqZdtlP3cBPQGd62w4AYVF8TfAcHM=
buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:T5EsLvEE5UMk62gVSwNY/7XlxknAP3sL8tYRsU68b4s=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1 h1:VkPk2LvyNK8NF9WmAnodrwgQZ3JiYAHFEmPKXUtlX4E=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1/go.mod h1:xzRLiRun3wTzhd+oBg9VkXi/c4PhjBjj73+2vSMH5eM=
buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1 h1:3G2O21DuY5Y/G32tP1mAI16AxwDYTscG2YaOb/WQty0=
buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:U4LUlabiYNYBd1pqYS9o8SsHjBRoEBysrfRVnebzJH0=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1 h1:Twi169wrd7ssCnK27Bymlytv5LmvwFV0zhKhJ64nCYM=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1/go.mod h1:PWzMbPHJ+Y31iNFrtSc5vy/wvm2805ZXyDZndzzFLa0=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000-1f40f333891d.1 h1:CSMft5/33d/88j3ziC4zid4DOP7X1Xv71I6pW3BUOvA=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000-1f40f333891d.1/go.mod h1:7azHjtjY3sk38xuZGlf2X6DpAPgQMoeZZMix+JkqsdU=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1 h1:cRvRFDg3/KPgEB2+8/orNwCWBhZO0wVZKij4TTKBj9w=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1/go.mod h1:oB3M+Fq9RgyUWGMqYk2FqRobQpdH1yZQZ9TYOoc4yIw=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1 h1:GnqNuwC6UjXvtjGscDekiO+/lstY7NWOILlsOMGNpC4=
+buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1/go.mod h1:oB3M+Fq9RgyUWGMqYk2FqRobQpdH1yZQZ9TYOoc4yIw=
buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 h1:kG4riHqlF9X6iZ1Oxs5/6ul6aue7MS+A6DK6HAchuTk=
buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1/go.mod h1:n9L7X3VAj4od4VHf2ScJuHARUUQTSxJqtRHZk/7Ptt0=
+buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1 h1:C1bT0G1In6Z6tBERd1XqwDjdxTK+PatSOJYlVk5Is60=
+buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1/go.mod h1:I9FcB1oNqT1nI+ny0GD8gF9YrIYrHmczgNu6MTE9fAo=
+buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.1-00000000000000-9a039a6ed8db.1 h1:v+RKpd5zE6rqOMA44OLRpDLPYlakjmddvmFFrKxzb48=
+buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.1-00000000000000-9a039a6ed8db.1/go.mod h1:HnX2FkSKZuD3zPFBR+Q17WzloqvIbFd0pYE++or/x2Q=
+buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1 h1:inT/lOAbHunpGP9YLqtAQNssrxEIgH/OmxXNwbXjUqs=
+buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1/go.mod h1:Lk1TBSGhOGvbtj0lb7eTeq+Z4N86/67Ay+WWxbqhh6s=
buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1 h1:hPMoxTiT7jJjnIbWqneBbL05VeVOTD9UeC/qdvzHL8g=
buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1/go.mod h1:2uasRFMH+a3DaF34c1o+w7/YtYnoknmARyYpb9W2QIc=
+buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1 h1:uJm/22xugluY5AL2NkIDbNEFBxzN6UcI8vts/bGEDBs=
+buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1/go.mod h1:1Z9P18WNTOT+KvLlc0+2FkcBJ7l5eRUUFcnOxHmLeRA=
+buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.1-00000000000000-e54e1c9ad405.1 h1:querphz/TCGphT0qGG4DJo6p8qAsfL5/8SEBgfemVhk=
+buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.1-00000000000000-e54e1c9ad405.1/go.mod h1:D6ou7OxkQXmiZDDNNrT147dA9wC9rhJPchCIfVbw9wM=
+buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1 h1:n2embOKwJS+YIyjHRDvOAo7c/kuv3fw9U+gQ/g2Yis8=
+buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1/go.mod h1:dHPKfn7RW6FSo7EkD0LqPhZUmRm5NXMB+tWvTrTnZTQ=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -39,6 +69,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek=
+filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ=
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q=
github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg=
@@ -107,6 +139,8 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P
github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw=
github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A=
+github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY=
+github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg=
github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA=
github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE=
github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k=
@@ -115,6 +149,8 @@ github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9Ur
github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE=
github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8=
github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00=
+github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c=
+github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
@@ -339,6 +375,8 @@ github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZn
github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU=
+github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo=
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4=
github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
@@ -535,11 +573,15 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
@@ -897,6 +939,12 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
+google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
+google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
+google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
+google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/grpc/execution/server.go b/grpc/execution/server.go
index ea63ca35a..1bb17cb1e 100644
--- a/grpc/execution/server.go
+++ b/grpc/execution/server.go
@@ -7,25 +7,23 @@ package execution
import (
"context"
"crypto/sha256"
- "errors"
"fmt"
- "math/big"
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/grpc/shared"
+ "github.com/ethereum/go-ethereum/params"
"sync"
"time"
astriaGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/execution/v1/executionv1grpc"
- optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1"
astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1"
primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1"
"github.com/ethereum/go-ethereum/beacon/engine"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/eth"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/metrics"
"github.com/ethereum/go-ethereum/miner"
- "github.com/ethereum/go-ethereum/params"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
"google.golang.org/protobuf/types/known/timestamppb"
@@ -38,140 +36,55 @@ type ExecutionServiceServerV1 struct {
// UnimplementedExecutionServiceServer for forward compatibility
astriaGrpc.UnimplementedExecutionServiceServer
- eth *eth.Ethereum
- bc *core.BlockChain
-
- commitmentUpdateLock sync.Mutex // Lock for the forkChoiceUpdated method
- blockExecutionLock sync.Mutex // Lock for the NewPayload method
-
- genesisInfoCalled bool
- getCommitmentStateCalled bool
-
- bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account
- bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty
-
- nextFeeRecipient common.Address // Fee recipient for the next block
+ sharedServiceContainer *shared.SharedServiceContainer
}
var (
- getGenesisInfoRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_requests", nil)
- getGenesisInfoSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_success", nil)
- getBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_block_requests", nil)
- getBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_block_success", nil)
- batchGetBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_requests", nil)
- batchGetBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_success", nil)
- executeBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_requests", nil)
- executeBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_success", nil)
- executeOptimisticBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/execute_optimistic_block_requests", nil)
- executeOptimisticBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/execute_optimistic_block_success", nil)
- getCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_requests", nil)
- getCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_success", nil)
- updateCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_requests", nil)
- updateCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_success", nil)
+ getGenesisInfoRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_requests", nil)
+ getGenesisInfoSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_success", nil)
+ getBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_block_requests", nil)
+ getBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_block_success", nil)
+ batchGetBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_requests", nil)
+ batchGetBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_success", nil)
+ executeBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_requests", nil)
+ executeBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_success", nil)
+ getCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_requests", nil)
+ getCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_success", nil)
+ updateCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_requests", nil)
+ updateCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_success", nil)
softCommitmentHeight = metrics.GetOrRegisterGauge("astria/execution/soft_commitment_height", nil)
firmCommitmentHeight = metrics.GetOrRegisterGauge("astria/execution/firm_commitment_height", nil)
totalExecutedTxCount = metrics.GetOrRegisterCounter("astria/execution/total_executed_tx", nil)
- executeBlockTimer = metrics.GetOrRegisterTimer("astria/execution/execute_block_time", nil)
- executionOptimisticBlockTimer = metrics.GetOrRegisterTimer("astria/execution/execute_optimistic_block_time", nil)
- commitmentStateUpdateTimer = metrics.GetOrRegisterTimer("astria/execution/commitment", nil)
+ executeBlockTimer = metrics.GetOrRegisterTimer("astria/execution/execute_block_time", nil)
+ commitmentStateUpdateTimer = metrics.GetOrRegisterTimer("astria/execution/commitment", nil)
)
-func NewExecutionServiceServerV1(eth *eth.Ethereum) (*ExecutionServiceServerV1, error) {
- bc := eth.BlockChain()
-
- if bc.Config().AstriaRollupName == "" {
- return nil, errors.New("rollup name not set")
- }
-
- if bc.Config().AstriaSequencerInitialHeight == 0 {
- return nil, errors.New("sequencer initial height not set")
- }
-
- if bc.Config().AstriaCelestiaInitialHeight == 0 {
- return nil, errors.New("celestia initial height not set")
- }
-
- if bc.Config().AstriaCelestiaHeightVariance == 0 {
- return nil, errors.New("celestia height variance not set")
+func NewExecutionServiceServerV1(sharedServiceContainer *shared.SharedServiceContainer) *ExecutionServiceServerV1 {
+ execServiceServerV1 := &ExecutionServiceServerV1{
+ sharedServiceContainer: sharedServiceContainer,
}
- bridgeAddresses := make(map[string]*params.AstriaBridgeAddressConfig)
- bridgeAllowedAssets := make(map[string]struct{})
- if bc.Config().AstriaBridgeAddressConfigs == nil {
- log.Warn("bridge addresses not set")
- } else {
- nativeBridgeSeen := false
- for _, cfg := range bc.Config().AstriaBridgeAddressConfigs {
- err := cfg.Validate(bc.Config().AstriaSequencerAddressPrefix)
- if err != nil {
- return nil, fmt.Errorf("invalid bridge address config: %w", err)
- }
-
- if cfg.Erc20Asset == nil {
- if nativeBridgeSeen {
- return nil, errors.New("only one native bridge address is allowed")
- }
- nativeBridgeSeen = true
- }
-
- if cfg.Erc20Asset != nil && cfg.SenderAddress == (common.Address{}) {
- return nil, errors.New("astria bridge sender address must be set for bridged ERC20 assets")
- }
-
- bridgeCfg := cfg
- bridgeAddresses[cfg.BridgeAddress] = &bridgeCfg
- bridgeAllowedAssets[cfg.AssetDenom] = struct{}{}
- if cfg.Erc20Asset == nil {
- log.Info("bridge for sequencer native asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom)
- } else {
- log.Info("bridge for ERC20 asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom, "contractAddress", cfg.Erc20Asset.ContractAddress)
- }
- }
- }
-
- // To decrease compute cost, we identify the next fee recipient at the start
- // and update it as we execute blocks.
- nextFeeRecipient := common.Address{}
- if bc.Config().AstriaFeeCollectors == nil {
- log.Warn("fee asset collectors not set, assets will be burned")
- } else {
- maxHeightCollectorMatch := uint32(0)
- nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1
- for height, collector := range bc.Config().AstriaFeeCollectors {
- if height <= nextBlock && height > maxHeightCollectorMatch {
- maxHeightCollectorMatch = height
- nextFeeRecipient = collector
- }
- }
- }
-
- return &ExecutionServiceServerV1{
- eth: eth,
- bc: bc,
- bridgeAddresses: bridgeAddresses,
- bridgeAllowedAssets: bridgeAllowedAssets,
- nextFeeRecipient: nextFeeRecipient,
- }, nil
+ return execServiceServerV1
}
func (s *ExecutionServiceServerV1) GetGenesisInfo(ctx context.Context, req *astriaPb.GetGenesisInfoRequest) (*astriaPb.GenesisInfo, error) {
log.Debug("GetGenesisInfo called")
getGenesisInfoRequestCount.Inc(1)
- rollupHash := sha256.Sum256([]byte(s.bc.Config().AstriaRollupName))
+ rollupHash := sha256.Sum256([]byte(s.Bc().Config().AstriaRollupName))
rollupId := primitivev1.RollupId{Inner: rollupHash[:]}
res := &astriaPb.GenesisInfo{
RollupId: &rollupId,
- SequencerGenesisBlockHeight: s.bc.Config().AstriaSequencerInitialHeight,
- CelestiaBlockVariance: s.bc.Config().AstriaCelestiaHeightVariance,
+ SequencerGenesisBlockHeight: s.Bc().Config().AstriaSequencerInitialHeight,
+ CelestiaBlockVariance: s.Bc().Config().AstriaCelestiaHeightVariance,
}
log.Info("GetGenesisInfo completed", "response", res)
getGenesisInfoSuccessCount.Inc(1)
- s.genesisInfoCalled = true
+ s.SetGenesisInfoCalled(true)
return res, nil
}
@@ -187,7 +100,7 @@ func (s *ExecutionServiceServerV1) GetBlock(ctx context.Context, req *astriaPb.G
res, err := s.getBlockFromIdentifier(req.GetIdentifier())
if err != nil {
log.Error("failed finding block", err)
- return nil, err
+ return nil, shared.WrapError(err, "failed finding block")
}
log.Debug("GetBlock completed", "request", req, "response", res)
@@ -212,7 +125,7 @@ func (s *ExecutionServiceServerV1) BatchGetBlocks(ctx context.Context, req *astr
block, err := s.getBlockFromIdentifier(id)
if err != nil {
log.Error("failed finding block with id", id, "error", err)
- return nil, err
+ return nil, shared.WrapError(err, fmt.Sprintf("failed finding block with id %s", id.String()))
}
blocks = append(blocks, block)
@@ -227,106 +140,6 @@ func (s *ExecutionServiceServerV1) BatchGetBlocks(ctx context.Context, req *astr
return res, nil
}
-func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int {
- lo := big.NewInt(0).SetUint64(u128.Lo)
- hi := big.NewInt(0).SetUint64(u128.Hi)
- hi.Lsh(hi, 64)
- return lo.Add(lo, hi)
-}
-
-func (s *ExecutionServiceServerV1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) {
- // we need to execute the optimistic block
- log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash)
- executeOptimisticBlockRequestCount.Inc(1)
-
- if err := validateStaticExecuteOptimisticBlockRequest(req); err != nil {
- log.Error("ExecuteOptimisticBlock called with invalid BaseBlock", "err", err)
- return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("BaseBlock is invalid: %s", err.Error()))
- }
-
- if !s.syncMethodsCalled() {
- return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called")
- }
-
- // Deliberately called after lock, to more directly measure the time spent executing
- executionStart := time.Now()
- defer executionOptimisticBlockTimer.UpdateSince(executionStart)
-
- // get the soft block
- softBlock := s.bc.CurrentSafeBlock()
-
- s.blockExecutionLock.Lock()
- nextFeeRecipient := s.nextFeeRecipient
- s.blockExecutionLock.Unlock()
-
- // the height that this block will be at
- height := s.bc.CurrentBlock().Number.Uint64() + 1
-
- txsToProcess := types.Transactions{}
- for _, tx := range req.Transactions {
- unmarshalledTx, err := validateAndUnmarshalSequencerTx(height, tx, s.bridgeAddresses, s.bridgeAllowedAssets)
- if err != nil {
- log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err)
- continue
- }
-
- err = s.eth.TxPool().ValidateTx(unmarshalledTx)
- if err != nil {
- log.Debug("failed to validate tx, ignoring", "tx", tx, "err", err)
- continue
- }
-
- txsToProcess = append(txsToProcess, unmarshalledTx)
- }
-
- // Build a payload to add to the chain
- payloadAttributes := &miner.BuildPayloadArgs{
- Parent: softBlock.Hash(),
- Timestamp: uint64(req.GetTimestamp().GetSeconds()),
- Random: common.Hash{},
- FeeRecipient: nextFeeRecipient,
- OverrideTransactions: txsToProcess,
- IsOptimisticExecution: true,
- }
- payload, err := s.eth.Miner().BuildPayload(payloadAttributes)
- if err != nil {
- log.Error("failed to build payload", "err", err)
- return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs")
- }
-
- block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil)
- if err != nil {
- log.Error("failed to convert executable data to block", err)
- return nil, status.Error(codes.Internal, "failed to execute block")
- }
-
- // this will insert the optimistic block into the chain and persist it's state without
- // setting it as the HEAD.
- err = s.bc.InsertBlockWithoutSetHead(block)
- if err != nil {
- log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", block.ParentHash(), "err", err)
- return nil, status.Error(codes.Internal, "failed to insert block to chain")
- }
-
- // we store a pointer to the optimistic block in the chain so that we can use it
- // to retrieve the state of the optimistic block
- s.bc.SetOptimistic(block)
-
- res := &astriaPb.Block{
- Number: uint32(block.NumberU64()),
- Hash: block.Hash().Bytes(),
- ParentBlockHash: block.ParentHash().Bytes(),
- Timestamp: ×tamppb.Timestamp{
- Seconds: int64(block.Time()),
- },
- }
-
- log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp)
- executeOptimisticBlockSuccessCount.Inc(1)
-
- return res, nil
-}
-
// ExecuteBlock drives deterministic derivation of a rollup block from sequencer
// block data
func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astriaPb.ExecuteBlockRequest) (*astriaPb.Block, error) {
@@ -337,53 +150,47 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria
log.Debug("ExecuteBlock called", "prevBlockHash", common.BytesToHash(req.PrevBlockHash), "tx_count", len(req.Transactions), "timestamp", req.Timestamp)
executeBlockRequestCount.Inc(1)
- s.blockExecutionLock.Lock()
- defer s.blockExecutionLock.Unlock()
+ s.BlockExecutionLock().Lock()
+ defer s.BlockExecutionLock().Unlock()
// Deliberately called after lock, to more directly measure the time spent executing
executionStart := time.Now()
defer executeBlockTimer.UpdateSince(executionStart)
- if !s.syncMethodsCalled() {
+ if !s.SyncMethodsCalled() {
return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called")
}
// Validate block being created has valid previous hash
prevHeadHash := common.BytesToHash(req.PrevBlockHash)
- softHash := s.bc.CurrentSafeBlock().Hash()
+ softHash := s.Bc().CurrentSafeBlock().Hash()
if prevHeadHash != softHash {
return nil, status.Error(codes.FailedPrecondition, "Block can only be created on top of soft block.")
}
// the height that this block will be at
- height := s.bc.CurrentBlock().Number.Uint64() + 1
+ height := s.Bc().CurrentBlock().Number.Uint64() + 1
- txsToProcess := types.Transactions{}
- for _, tx := range req.Transactions {
- unmarshalledTx, err := validateAndUnmarshalSequencerTx(height, tx, s.bridgeAddresses, s.bridgeAllowedAssets)
- if err != nil {
- log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err)
- continue
- }
- txsToProcess = append(txsToProcess, unmarshalledTx)
- }
+ addressPrefix := s.Bc().Config().AstriaSequencerAddressPrefix
+
+ txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes(), s.AuctioneerAddress(), addressPrefix)
// This set of ordered TXs on the TxPool is has been configured to be used by
// the Miner when building a payload.
- s.eth.TxPool().SetAstriaOrdered(txsToProcess)
+ s.Eth().TxPool().SetAstriaOrdered(txsToProcess)
// Build a payload to add to the chain
payloadAttributes := &miner.BuildPayloadArgs{
Parent: prevHeadHash,
Timestamp: uint64(req.GetTimestamp().GetSeconds()),
Random: common.Hash{},
- FeeRecipient: s.nextFeeRecipient,
+ FeeRecipient: s.NextFeeRecipient(),
OverrideTransactions: types.Transactions{},
IsOptimisticExecution: false,
}
- payload, err := s.eth.Miner().BuildPayload(payloadAttributes)
+ payload, err := s.Eth().Miner().BuildPayload(payloadAttributes)
if err != nil {
log.Error("failed to build payload", "err", err)
- return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs")
+ return nil, status.Errorf(codes.InvalidArgument, shared.WrapError(err, "Could not build block with provided txs").Error())
}
// call blockchain.InsertChain to actually execute and write the blocks to
@@ -391,16 +198,16 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria
block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil)
if err != nil {
log.Error("failed to convert executable data to block", err)
- return nil, status.Error(codes.Internal, "failed to execute block")
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to convert executable data to block").Error())
}
- err = s.bc.InsertBlockWithoutSetHead(block)
+ err = s.Bc().InsertBlockWithoutSetHead(block)
if err != nil {
log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", req.PrevBlockHash, "err", err)
- return nil, status.Error(codes.Internal, "failed to insert block to chain")
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to insert block to chain").Error())
}
// remove txs from original mempool
- s.eth.TxPool().ClearAstriaOrdered()
+ s.Eth().TxPool().ClearAstriaOrdered()
res := &astriaPb.Block{
Number: uint32(block.NumberU64()),
@@ -411,8 +218,16 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria
},
}
- if next, ok := s.bc.Config().AstriaFeeCollectors[res.Number+1]; ok {
- s.nextFeeRecipient = next
+ if next, ok := s.Bc().Config().AstriaFeeCollectors[res.Number+1]; ok {
+ s.SetNextFeeRecipient(next)
+ }
+
+ if address, ok := s.Bc().Config().AstriaAuctioneerAddresses[res.Number+1]; ok {
+ if err := shared.ValidateBech32mAddress(address, addressPrefix); err != nil {
+ log.Error("auctioneer address is not a valid bech32 address", "block", res.Number+1, "address", address)
+ }
+
+ s.SetAuctioneerAddress(address)
}
log.Info("ExecuteBlock completed", "block_num", res.Number, "timestamp", res.Timestamp)
@@ -426,18 +241,18 @@ func (s *ExecutionServiceServerV1) GetCommitmentState(ctx context.Context, req *
log.Info("GetCommitmentState called")
getCommitmentStateRequestCount.Inc(1)
- softBlock, err := ethHeaderToExecutionBlock(s.bc.CurrentSafeBlock())
+ softBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentSafeBlock())
if err != nil {
log.Error("error finding safe block", err)
- return nil, status.Error(codes.Internal, "could not locate soft block")
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "could not locate soft block").Error())
}
- firmBlock, err := ethHeaderToExecutionBlock(s.bc.CurrentFinalBlock())
+ firmBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentFinalBlock())
if err != nil {
log.Error("error finding final block", err)
- return nil, status.Error(codes.Internal, "could not locate firm block")
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "could not locate firm block").Error())
}
- celestiaBlock := s.bc.CurrentBaseCelestiaHeight()
+ celestiaBlock := s.Bc().CurrentBaseCelestiaHeight()
res := &astriaPb.CommitmentState{
Soft: softBlock,
@@ -447,7 +262,7 @@ func (s *ExecutionServiceServerV1) GetCommitmentState(ctx context.Context, req *
log.Info("GetCommitmentState completed", "soft_height", res.Soft.Number, "firm_height", res.Firm.Number, "base_celestia_height", res.BaseCelestiaHeight)
getCommitmentStateSuccessCount.Inc(1)
- s.getCommitmentStateCalled = true
+ s.SetGetCommitmentStateCalled(true)
return res, nil
}
@@ -464,15 +279,15 @@ func (s *ExecutionServiceServerV1) UpdateCommitmentState(ctx context.Context, re
commitmentUpdateStart := time.Now()
defer commitmentStateUpdateTimer.UpdateSince(commitmentUpdateStart)
- s.commitmentUpdateLock.Lock()
- defer s.commitmentUpdateLock.Unlock()
+ s.CommitmentUpdateLock().Lock()
+ defer s.CommitmentUpdateLock().Unlock()
- if !s.syncMethodsCalled() {
+ if !s.SyncMethodsCalled() {
return nil, status.Error(codes.PermissionDenied, "Cannot update commitment state until GetGenesisInfo && GetCommitmentState methods are called")
}
- if s.bc.CurrentBaseCelestiaHeight() > req.CommitmentState.BaseCelestiaHeight {
- errStr := fmt.Sprintf("Base Celestia height cannot be decreased, current_base_celestia_height: %d, new_base_celestia_height: %d", s.bc.CurrentBaseCelestiaHeight(), req.CommitmentState.BaseCelestiaHeight)
+ if s.Bc().CurrentBaseCelestiaHeight() > req.CommitmentState.BaseCelestiaHeight {
+ errStr := fmt.Sprintf("Base Celestia height cannot be decreased, current_base_celestia_height: %d, new_base_celestia_height: %d", s.Bc().CurrentBaseCelestiaHeight(), req.CommitmentState.BaseCelestiaHeight)
return nil, status.Error(codes.InvalidArgument, errStr)
}
@@ -480,50 +295,50 @@ func (s *ExecutionServiceServerV1) UpdateCommitmentState(ctx context.Context, re
firmEthHash := common.BytesToHash(req.CommitmentState.Firm.Hash)
// Validate that the firm and soft blocks exist before going further
- softBlock := s.bc.GetBlockByHash(softEthHash)
+ softBlock := s.Bc().GetBlockByHash(softEthHash)
if softBlock == nil {
return nil, status.Error(codes.InvalidArgument, "Soft block specified does not exist")
}
- firmBlock := s.bc.GetBlockByHash(firmEthHash)
+ firmBlock := s.Bc().GetBlockByHash(firmEthHash)
if firmBlock == nil {
return nil, status.Error(codes.InvalidArgument, "Firm block specified does not exist")
}
- currentHead := s.bc.CurrentBlock().Hash()
+ currentHead := s.Bc().CurrentBlock().Hash()
// Update the canonical chain to soft block. We must do this before last
// validation step since there is no way to check if firm block descends from
// anything but the canonical chain
if currentHead != softEthHash {
- if _, err := s.bc.SetCanonical(softBlock); err != nil {
+ if _, err := s.Bc().SetCanonical(softBlock); err != nil {
log.Error("failed updating canonical chain to soft block", err)
- return nil, status.Error(codes.Internal, "Could not update head to safe hash")
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "Could not update head to safe hash").Error())
}
}
// Once head is updated validate that firm belongs to chain
- rollbackBlock := s.bc.GetBlockByHash(currentHead)
- if s.bc.GetCanonicalHash(firmBlock.NumberU64()) != firmEthHash {
+ rollbackBlock := s.Bc().GetBlockByHash(currentHead)
+ if s.Bc().GetCanonicalHash(firmBlock.NumberU64()) != firmEthHash {
log.Error("firm block not found in canonical chain defined by soft block, rolling back")
- if _, err := s.bc.SetCanonical(rollbackBlock); err != nil {
+ if _, err := s.Bc().SetCanonical(rollbackBlock); err != nil {
panic("rollback to previous head after failed validation failed")
}
return nil, status.Error(codes.InvalidArgument, "soft block in request is not a descendant of the current firmly committed block")
}
- s.eth.SetSynced()
+ s.Eth().SetSynced()
// Updating the safe and final after everything validated
- currentSafe := s.bc.CurrentSafeBlock().Hash()
+ currentSafe := s.Bc().CurrentSafeBlock().Hash()
if currentSafe != softEthHash {
- s.bc.SetSafe(softBlock.Header())
+ s.Bc().SetSafe(softBlock.Header())
}
- currentFirm := s.bc.CurrentFinalBlock().Hash()
+ currentFirm := s.Bc().CurrentFinalBlock().Hash()
if currentFirm != firmEthHash {
- s.bc.SetCelestiaFinalized(firmBlock.Header(), req.CommitmentState.BaseCelestiaHeight)
+ s.Bc().SetCelestiaFinalized(firmBlock.Header(), req.CommitmentState.BaseCelestiaHeight)
}
log.Info("UpdateCommitmentState completed", "soft_height", softBlock.NumberU64(), "firm_height", firmBlock.NumberU64())
@@ -539,9 +354,9 @@ func (s *ExecutionServiceServerV1) getBlockFromIdentifier(identifier *astriaPb.B
// Grab the header based on the identifier provided
switch idType := identifier.Identifier.(type) {
case *astriaPb.BlockIdentifier_BlockNumber:
- header = s.bc.GetHeaderByNumber(uint64(identifier.GetBlockNumber()))
+ header = s.Bc().GetHeaderByNumber(uint64(identifier.GetBlockNumber()))
case *astriaPb.BlockIdentifier_BlockHash:
- header = s.bc.GetHeaderByHash(common.BytesToHash(identifier.GetBlockHash()))
+ header = s.Bc().GetHeaderByHash(common.BytesToHash(identifier.GetBlockHash()))
default:
return nil, status.Errorf(codes.InvalidArgument, "identifier has unexpected type %T", idType)
}
@@ -553,7 +368,7 @@ func (s *ExecutionServiceServerV1) getBlockFromIdentifier(identifier *astriaPb.B
res, err := ethHeaderToExecutionBlock(header)
if err != nil {
// This should never happen since we validate header exists above.
- return nil, status.Error(codes.Internal, "internal error")
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "internal error").Error())
}
return res, nil
@@ -574,6 +389,62 @@ func ethHeaderToExecutionBlock(header *types.Header) (*astriaPb.Block, error) {
}, nil
}
-func (s *ExecutionServiceServerV1) syncMethodsCalled() bool {
- return s.genesisInfoCalled && s.getCommitmentStateCalled
+func (s *ExecutionServiceServerV1) Eth() *eth.Ethereum {
+ return s.sharedServiceContainer.Eth()
+}
+
+func (s *ExecutionServiceServerV1) Bc() *core.BlockChain {
+ return s.sharedServiceContainer.Bc()
+}
+
+func (s *ExecutionServiceServerV1) SetGenesisInfoCalled(value bool) {
+ s.sharedServiceContainer.SetGenesisInfoCalled(value)
+}
+
+func (s *ExecutionServiceServerV1) GenesisInfoCalled() bool {
+ return s.sharedServiceContainer.GenesisInfoCalled()
+}
+
+func (s *ExecutionServiceServerV1) SetGetCommitmentStateCalled(value bool) {
+ s.sharedServiceContainer.SetGetCommitmentStateCalled(value)
+}
+
+func (s *ExecutionServiceServerV1) CommitmentStateCalled() bool {
+ return s.sharedServiceContainer.CommitmentStateCalled()
+}
+
+func (s *ExecutionServiceServerV1) CommitmentUpdateLock() *sync.Mutex {
+ return s.sharedServiceContainer.CommitmentUpdateLock()
+}
+
+func (s *ExecutionServiceServerV1) BlockExecutionLock() *sync.Mutex {
+ return s.sharedServiceContainer.BlockExecutionLock()
+}
+
+func (s *ExecutionServiceServerV1) NextFeeRecipient() common.Address {
+ return s.sharedServiceContainer.NextFeeRecipient()
+}
+
+func (s *ExecutionServiceServerV1) SetNextFeeRecipient(feeRecipient common.Address) {
+ s.sharedServiceContainer.SetNextFeeRecipient(feeRecipient)
+}
+
+func (s *ExecutionServiceServerV1) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig {
+ return s.sharedServiceContainer.BridgeAddresses()
+}
+
+func (s *ExecutionServiceServerV1) BridgeAllowedAssets() map[string]struct{} {
+ return s.sharedServiceContainer.BridgeAllowedAssets()
+}
+
+func (s *ExecutionServiceServerV1) SyncMethodsCalled() bool {
+ return s.sharedServiceContainer.SyncMethodsCalled()
+}
+
+func (s *ExecutionServiceServerV1) AuctioneerAddress() string {
+ return s.sharedServiceContainer.AuctioneerAddress()
+}
+
+func (s *ExecutionServiceServerV1) SetAuctioneerAddress(auctioneerAddress string) {
+ s.sharedServiceContainer.SetAuctioneerAddress(auctioneerAddress)
}
diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go
index 2dec2e861..211be906b 100644
--- a/grpc/execution/server_test.go
+++ b/grpc/execution/server_test.go
@@ -1,7 +1,6 @@
package execution
import (
- optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1"
astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1"
primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1"
sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1"
@@ -9,9 +8,9 @@ import (
"context"
"crypto/sha256"
"github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/grpc/shared"
"github.com/ethereum/go-ethereum/params"
"github.com/holiman/uint256"
"github.com/stretchr/testify/require"
@@ -20,13 +19,13 @@ import (
"google.golang.org/protobuf/types/known/timestamppb"
"math/big"
"testing"
- "time"
)
-func TestExecutionService_GetGenesisInfo(t *testing.T) {
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
+func TestExecutionServiceV1_GetGenesisInfo(t *testing.T) {
+ ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10)
+ serviceV1 := SetupExecutionService(t, sharedServiceContainer)
- genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
+ genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
require.Nil(t, err, "GetGenesisInfo failed")
hashedRollupId := sha256.Sum256([]byte(ethservice.BlockChain().Config().AstriaRollupName))
@@ -34,13 +33,14 @@ func TestExecutionService_GetGenesisInfo(t *testing.T) {
require.True(t, bytes.Equal(genesisInfo.RollupId.Inner, hashedRollupId[:]), "RollupId is not correct")
require.Equal(t, genesisInfo.GetSequencerGenesisBlockHeight(), ethservice.BlockChain().Config().AstriaSequencerInitialHeight, "SequencerInitialHeight is not correct")
require.Equal(t, genesisInfo.GetCelestiaBlockVariance(), ethservice.BlockChain().Config().AstriaCelestiaHeightVariance, "CelestiaHeightVariance is not correct")
- require.True(t, serviceV1Alpha1.genesisInfoCalled, "GetGenesisInfo should be called")
+ require.True(t, serviceV1.sharedServiceContainer.GenesisInfoCalled(), "GetGenesisInfo should be called")
}
-func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) {
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
+func TestExecutionServiceServerV1_GetCommitmentState(t *testing.T) {
+ ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10)
+ serviceV1 := SetupExecutionService(t, sharedServiceContainer)
- commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
require.Nil(t, err, "GetCommitmentState failed")
require.NotNil(t, commitmentState, "CommitmentState is nil")
@@ -60,11 +60,12 @@ func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) {
require.Equal(t, uint64(commitmentState.Firm.Number), firmBlock.Number.Uint64(), "Firm Block Number do not match")
require.Equal(t, commitmentState.BaseCelestiaHeight, ethservice.BlockChain().Config().AstriaCelestiaInitialHeight, "BaseCelestiaHeight is not correct")
- require.True(t, serviceV1Alpha1.getCommitmentStateCalled, "GetCommitmentState should be called")
+ require.True(t, serviceV1.sharedServiceContainer.CommitmentStateCalled(), "GetCommitmentState should be called")
}
-func TestExecutionService_GetBlock(t *testing.T) {
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
+func TestExecutionServiceV1_GetBlock(t *testing.T) {
+ ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10)
+ serviceV1 := SetupExecutionService(t, sharedServiceContainer)
tests := []struct {
description string
@@ -96,7 +97,7 @@ func TestExecutionService_GetBlock(t *testing.T) {
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
- blockInfo, err := serviceV1Alpha1.GetBlock(context.Background(), tt.getBlockRequst)
+ blockInfo, err := serviceV1.GetBlock(context.Background(), tt.getBlockRequst)
if tt.expectedReturnCode > 0 {
require.NotNil(t, err, "GetBlock should return an error")
require.Equal(t, tt.expectedReturnCode, status.Code(err), "GetBlock failed")
@@ -122,8 +123,9 @@ func TestExecutionService_GetBlock(t *testing.T) {
}
}
-func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) {
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
+func TestExecutionServiceServerV1_BatchGetBlocks(t *testing.T) {
+ ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10)
+ serviceV1 := SetupExecutionService(t, sharedServiceContainer)
tests := []struct {
description string
@@ -173,7 +175,7 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) {
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
- batchBlocksRes, err := serviceV1Alpha1.BatchGetBlocks(context.Background(), tt.batchGetBlockRequest)
+ batchBlocksRes, err := serviceV1.BatchGetBlocks(context.Background(), tt.batchGetBlockRequest)
if tt.expectedReturnCode > 0 {
require.NotNil(t, err, "BatchGetBlocks should return an error")
require.Equal(t, tt.expectedReturnCode, status.Code(err), "BatchGetBlocks failed")
@@ -193,179 +195,8 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) {
}
}
-func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 {
- lo := i.Uint64()
- hi := new(big.Int).Rsh(i, 64).Uint64()
- return &primitivev1.Uint128{Lo: lo, Hi: hi}
-}
-
-func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) {
- ethservice, _ := setupExecutionService(t, 10)
-
- tests := []struct {
- description string
- callGenesisInfoAndGetCommitmentState bool
- numberOfTxs int
- prevBlockHash []byte
- timestamp uint64
- depositTxAmount *big.Int // if this is non zero then we send a deposit tx
- expectedReturnCode codes.Code
- }{
- {
- description: "ExecuteOptimisticBlock without calling GetGenesisInfo and GetCommitmentState",
- callGenesisInfoAndGetCommitmentState: false,
- numberOfTxs: 5,
- prevBlockHash: ethservice.BlockChain().GetBlockByNumber(2).Hash().Bytes(),
- timestamp: ethservice.BlockChain().GetBlockByNumber(2).Time() + 2,
- depositTxAmount: big.NewInt(0),
- expectedReturnCode: codes.PermissionDenied,
- },
- {
- description: "ExecuteOptimisticBlock with 5 txs and no deposit tx",
- callGenesisInfoAndGetCommitmentState: true,
- numberOfTxs: 5,
- prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(),
- timestamp: ethservice.BlockChain().CurrentSafeBlock().Time + 2,
- depositTxAmount: big.NewInt(0),
- expectedReturnCode: 0,
- },
- {
- description: "ExecuteOptimisticBlock with 5 txs and a deposit tx",
- callGenesisInfoAndGetCommitmentState: true,
- numberOfTxs: 5,
- prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(),
- timestamp: ethservice.BlockChain().CurrentSafeBlock().Time + 2,
- depositTxAmount: big.NewInt(1000000000000000000),
- expectedReturnCode: 0,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.description, func(t *testing.T) {
- // reset the blockchain with each test
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
-
- var err error // adding this to prevent shadowing of genesisInfo in the below if branch
- var genesisInfo *astriaPb.GenesisInfo
- var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState
- if tt.callGenesisInfoAndGetCommitmentState {
- // call getGenesisInfo and getCommitmentState before calling executeBlock
- genesisInfo, err = serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
- require.Nil(t, err, "GetGenesisInfo failed")
- require.NotNil(t, genesisInfo, "GenesisInfo is nil")
-
- commitmentStateBeforeExecuteBlock, err = serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
- require.Nil(t, err, "GetCommitmentState failed")
- require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil")
- }
-
- // create the txs to send
- // create 5 txs
- txs := []*types.Transaction{}
- marshalledTxs := []*sequencerblockv1.RollupData{}
- for i := 0; i < 5; i++ {
- unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
- tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey)
- require.Nil(t, err, "Failed to sign tx")
- txs = append(txs, tx)
-
- marshalledTx, err := tx.MarshalBinary()
- require.Nil(t, err, "Failed to marshal tx")
- marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{
- Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx},
- })
- }
-
- // create deposit tx if depositTxAmount is non zero
- if tt.depositTxAmount.Cmp(big.NewInt(0)) != 0 {
- depositAmount := bigIntToProtoU128(tt.depositTxAmount)
- bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
- bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
-
- // create new chain destination address for better testing
- chainDestinationAddressPrivKey, err := crypto.GenerateKey()
- require.Nil(t, err, "Failed to generate chain destination address")
-
- chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey)
-
- depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
- BridgeAddress: &primitivev1.Address{
- Bech32M: bridgeAddress,
- },
- Asset: bridgeAssetDenom,
- Amount: depositAmount,
- RollupId: genesisInfo.RollupId,
- DestinationChainAddress: chainDestinationAddress.String(),
- SourceTransactionId: &primitivev1.TransactionId{
- Inner: "test_tx_hash",
- },
- SourceActionIndex: 0,
- }}}
-
- marshalledTxs = append(marshalledTxs, depositTx)
- }
-
- optimisticHeadCh := make(chan core.ChainOptimisticHeadEvent, 1)
- optimsticHeadSub := ethservice.BlockChain().SubscribeChainOptimisticHeadEvent(optimisticHeadCh)
- defer optimsticHeadSub.Unsubscribe()
-
- baseBlockReq := &optimsticPb.BaseBlock{
- Timestamp: ×tamppb.Timestamp{
- Seconds: int64(tt.timestamp),
- },
- Transactions: marshalledTxs,
- SequencerBlockHash: []byte("test_hash"),
- }
-
- res, err := serviceV1Alpha1.ExecuteOptimisticBlock(context.Background(), baseBlockReq)
- if tt.expectedReturnCode > 0 {
- require.NotNil(t, err, "ExecuteOptimisticBlock should return an error")
- require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteOptimisticBlock failed")
- } else {
- require.Nil(t, err, "ExecuteOptimisticBlock failed")
- }
- if err == nil {
- require.NotNil(t, res, "ExecuteOptimisticBlock response is nil")
-
- astriaOrdered := ethservice.TxPool().AstriaOrdered()
- require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty")
-
- // check if commitment state is not updated
- commitmentStateAfterExecuteBlock, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
- require.Nil(t, err, "GetCommitmentState failed")
-
- require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated")
-
- // check if the optimistic block is set
- optimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock()
- require.NotNil(t, optimisticBlock, "Optimistic block is not set")
-
- // check if the optimistic block is correct
- require.Equal(t, common.BytesToHash(res.Hash), optimisticBlock.Hash(), "Optimistic block hashes do not match")
- require.Equal(t, common.BytesToHash(res.ParentBlockHash), optimisticBlock.ParentHash, "Optimistic block parent hashes do not match")
- require.Equal(t, uint64(res.Number), optimisticBlock.Number.Uint64(), "Optimistic block numbers do not match")
-
- // check if optimistic block is inserted into chain
- block := ethservice.BlockChain().GetBlockByHash(optimisticBlock.Hash())
- require.NotNil(t, block, "Optimistic block not found in blockchain")
- require.Equal(t, uint64(res.Number), block.NumberU64(), "Block number is not correct")
-
- // timeout for optimistic head event
- select {
- case blockEvent := <-optimisticHeadCh:
- require.NotNil(t, blockEvent, "Optimistic head event not received")
- require.Equal(t, block.Hash(), blockEvent.Block.Hash(), "Optimistic head event block hash is not correct")
- require.Equal(t, block.NumberU64(), blockEvent.Block.NumberU64(), "Optimistic head event block number is not correct")
- case <-time.After(2 * time.Second):
- require.FailNow(t, "Optimistic head event not received")
- }
- }
- })
- }
-}
-
-func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) {
- ethservice, _ := setupExecutionService(t, 10)
+func TestExecutionServiceServerV1_ExecuteBlock(t *testing.T) {
+ ethservice, _, _, _ := shared.SetupSharedService(t, 10)
tests := []struct {
description string
@@ -417,18 +248,19 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) {
for _, tt := range tests {
t.Run(tt.description, func(t *testing.T) {
// reset the blockchain with each test
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
+ ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10)
+ serviceV1 := SetupExecutionService(t, sharedServiceContainer)
var err error // adding this to prevent shadowing of genesisInfo in the below if branch
var genesisInfo *astriaPb.GenesisInfo
var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState
if tt.callGenesisInfoAndGetCommitmentState {
// call getGenesisInfo and getCommitmentState before calling executeBlock
- genesisInfo, err = serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
+ genesisInfo, err = serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
require.Nil(t, err, "GetGenesisInfo failed")
require.NotNil(t, genesisInfo, "GenesisInfo is nil")
- commitmentStateBeforeExecuteBlock, err = serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ commitmentStateBeforeExecuteBlock, err = serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
require.Nil(t, err, "GetCommitmentState failed")
require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil")
}
@@ -438,8 +270,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) {
txs := []*types.Transaction{}
marshalledTxs := []*sequencerblockv1.RollupData{}
for i := 0; i < 5; i++ {
- unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
- tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey)
+ unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
require.Nil(t, err, "Failed to sign tx")
txs = append(txs, tx)
@@ -452,7 +284,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) {
// create deposit tx if depositTxAmount is non zero
if tt.depositTxAmount.Cmp(big.NewInt(0)) != 0 {
- depositAmount := bigIntToProtoU128(tt.depositTxAmount)
+ depositAmount := shared.BigIntToProtoU128(tt.depositTxAmount)
bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
@@ -487,7 +319,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) {
Transactions: marshalledTxs,
}
- executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq)
+ executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq)
if tt.expectedReturnCode > 0 {
require.NotNil(t, err, "ExecuteBlock should return an error")
require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteBlock failed")
@@ -499,7 +331,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) {
require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty")
// check if commitment state is not updated
- commitmentStateAfterExecuteBlock, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ commitmentStateAfterExecuteBlock, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
require.Nil(t, err, "GetCommitmentState failed")
require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated")
@@ -509,16 +341,17 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) {
}
}
-func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testing.T) {
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
+func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitment(t *testing.T) {
+ ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10)
+ serviceV1 := SetupExecutionService(t, sharedServiceContainer)
// call genesis info
- genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
+ genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
require.Nil(t, err, "GetGenesisInfo failed")
require.NotNil(t, genesisInfo, "GenesisInfo is nil")
// call get commitment state
- commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
require.Nil(t, err, "GetCommitmentState failed")
require.NotNil(t, commitmentState, "CommitmentState is nil")
@@ -530,8 +363,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi
txs := []*types.Transaction{}
marshalledTxs := []*sequencerblockv1.RollupData{}
for i := 0; i < 5; i++ {
- unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
- tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey)
+ unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
require.Nil(t, err, "Failed to sign tx")
txs = append(txs, tx)
@@ -543,7 +376,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi
}
amountToDeposit := big.NewInt(1000000000000000000)
- depositAmount := bigIntToProtoU128(amountToDeposit)
+ depositAmount := shared.BigIntToProtoU128(amountToDeposit)
bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
@@ -583,7 +416,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi
Transactions: marshalledTxs,
}
- executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq)
+ executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq)
require.Nil(t, err, "ExecuteBlock failed")
require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil")
@@ -611,7 +444,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi
},
}
- updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq)
+ updateCommitmentStateRes, err := serviceV1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq)
require.Nil(t, err, "UpdateCommitmentState failed")
require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil")
require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request")
@@ -645,16 +478,17 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi
}
// Check that invalid transactions are not added into a block and are removed from the mempool
-func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) {
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
+func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) {
+ ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10)
+ serviceV1 := SetupExecutionService(t, sharedServiceContainer)
// call genesis info
- genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
+ genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
require.Nil(t, err, "GetGenesisInfo failed")
require.NotNil(t, genesisInfo, "GenesisInfo is nil")
// call get commitment state
- commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
require.Nil(t, err, "GetCommitmentState failed")
require.NotNil(t, commitmentState, "CommitmentState is nil")
@@ -669,14 +503,14 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval
stateDb, err := ethservice.BlockChain().StateAt(previousBlock.Root())
require.Nil(t, err, "Failed to get state db")
- latestNonce := stateDb.GetNonce(testAddr)
+ latestNonce := stateDb.GetNonce(shared.TestAddr)
// create 5 txs
txs := []*types.Transaction{}
marshalledTxs := []*sequencerblockv1.RollupData{}
for i := 0; i < 5; i++ {
- unsignedTx := types.NewTransaction(latestNonce+uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
- tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey)
+ unsignedTx := types.NewTransaction(latestNonce+uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
require.Nil(t, err, "Failed to sign tx")
txs = append(txs, tx)
@@ -687,9 +521,9 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval
})
}
- // add a tx with lesser gas than the base gas
- unsignedTx := types.NewTransaction(latestNonce+uint64(5), testToAddress, big.NewInt(1), ethservice.BlockChain().GasLimit(), big.NewInt(params.InitialBaseFee*2), nil)
- tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey)
+ // add a tx which takes up more gas than the block gas limit
+ unsignedTx := types.NewTransaction(latestNonce+uint64(5), shared.TestToAddress, big.NewInt(1), ethservice.BlockChain().GasLimit(), big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
require.Nil(t, err, "Failed to sign tx")
txs = append(txs, tx)
@@ -707,7 +541,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval
Transactions: marshalledTxs,
}
- executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq)
+ executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq)
require.Nil(t, err, "ExecuteBlock failed")
require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil")
@@ -735,7 +569,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval
},
}
- updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq)
+ updateCommitmentStateRes, err := serviceV1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq)
require.Nil(t, err, "UpdateCommitmentState failed")
require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil")
require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request")
diff --git a/grpc/execution/test_setup.go b/grpc/execution/test_setup.go
new file mode 100644
index 000000000..fafa48ed7
--- /dev/null
+++ b/grpc/execution/test_setup.go
@@ -0,0 +1,12 @@
+package execution
+
+import (
+ "github.com/ethereum/go-ethereum/grpc/shared"
+ "testing"
+)
+
+func SetupExecutionService(t *testing.T, sharedService *shared.SharedServiceContainer) *ExecutionServiceServerV1 {
+ t.Helper()
+
+ return NewExecutionServiceServerV1(sharedService)
+}
diff --git a/grpc/execution/validation.go b/grpc/execution/validation.go
index 86dee556d..5a5cc9957 100644
--- a/grpc/execution/validation.go
+++ b/grpc/execution/validation.go
@@ -1,113 +1,10 @@
package execution
import (
- "crypto/sha256"
- "fmt"
- "math/big"
-
- optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1"
astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1"
- sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/contracts"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/log"
- "github.com/ethereum/go-ethereum/params"
+ "fmt"
)
-// `validateAndUnmarshalSequencerTx` validates and unmarshals the given rollup sequencer transaction.
-// If the sequencer transaction is a deposit tx, we ensure that the asset ID is allowed and the bridge address is known.
-// If the sequencer transaction is not a deposit tx, we unmarshal the sequenced data into an Ethereum transaction. We ensure that the
-// tx is not a blob tx or a deposit tx.
-func validateAndUnmarshalSequencerTx(
- height uint64,
- tx *sequencerblockv1.RollupData,
- bridgeAddresses map[string]*params.AstriaBridgeAddressConfig,
- bridgeAllowedAssets map[string]struct{},
-) (*types.Transaction, error) {
- if deposit := tx.GetDeposit(); deposit != nil {
- bridgeAddress := deposit.BridgeAddress.GetBech32M()
- bac, ok := bridgeAddresses[bridgeAddress]
- if !ok {
- return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress)
- }
-
- if height < uint64(bac.StartHeight) {
- return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight)
- }
-
- if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok {
- return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset)
- }
-
- if deposit.Asset != bac.AssetDenom {
- return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress)
- }
-
- recipient := common.HexToAddress(deposit.DestinationChainAddress)
- amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount))
-
- if bac.Erc20Asset != nil {
- log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress)
- abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi()
- if err != nil {
- // this should never happen, as the abi is hardcoded in the contract bindings
- return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err)
- }
-
- // pack arguments for calling the `mint` function on the ERC20 contract
- args := []interface{}{recipient, amount}
- calldata, err := abi.Pack("mint", args...)
- if err != nil {
- return nil, err
- }
-
- txdata := types.DepositTx{
- From: bac.SenderAddress,
- Value: new(big.Int), // don't need to set this, as we aren't minting the native asset
- // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer.
- //
- // the fees are spent from the "bridge account" which is not actually a real account, but is instead some
- // address defined by consensus, so the gas cost is not actually deducted from any account.
- Gas: 64000,
- To: &bac.Erc20Asset.ContractAddress,
- Data: calldata,
- SourceTransactionId: *deposit.SourceTransactionId,
- SourceTransactionIndex: deposit.SourceActionIndex,
- }
-
- tx := types.NewTx(&txdata)
- return tx, nil
- }
-
- txdata := types.DepositTx{
- From: bac.SenderAddress,
- To: &recipient,
- Value: amount,
- Gas: 0,
- SourceTransactionId: *deposit.SourceTransactionId,
- SourceTransactionIndex: deposit.SourceActionIndex,
- }
- return types.NewTx(&txdata), nil
- } else {
- ethTx := new(types.Transaction)
- err := ethTx.UnmarshalBinary(tx.GetSequencedData())
- if err != nil {
- return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData()))
- }
-
- if ethTx.Type() == types.DepositTxType {
- return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData()))
- }
-
- if ethTx.Type() == types.BlobTxType {
- return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData()))
- }
-
- return ethTx, nil
- }
-}
-
// `validateStaticExecuteBlockRequest` validates the given execute block request without regard
// to the current state of the system. This is useful for validating the request before any
// state changes or reads are made as a basic guard.
@@ -122,17 +19,6 @@ func validateStaticExecuteBlockRequest(req *astriaPb.ExecuteBlockRequest) error
return nil
}
-func validateStaticExecuteOptimisticBlockRequest(req *optimsticPb.BaseBlock) error {
- if req.Timestamp == nil {
- return fmt.Errorf("Timestamp cannot be nil")
- }
- if len(req.SequencerBlockHash) == 0 {
- return fmt.Errorf("SequencerBlockHash cannot be empty")
- }
-
- return nil
-}
-
// `validateStaticCommitment` validates the given commitment without regard to the current state of the system.
func validateStaticCommitmentState(commitmentState *astriaPb.CommitmentState) error {
if commitmentState == nil {
diff --git a/grpc/execution/validation_test.go b/grpc/execution/validation_test.go
deleted file mode 100644
index 9c2b149d6..000000000
--- a/grpc/execution/validation_test.go
+++ /dev/null
@@ -1,207 +0,0 @@
-package execution
-
-import (
- "math/big"
- "testing"
-
- primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1"
- sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1"
- "github.com/btcsuite/btcd/btcutil/bech32"
- "github.com/ethereum/go-ethereum/common"
- "github.com/ethereum/go-ethereum/core/types"
- "github.com/ethereum/go-ethereum/crypto"
- "github.com/ethereum/go-ethereum/params"
- "github.com/holiman/uint256"
- "github.com/stretchr/testify/require"
-)
-
-func testBlobTx() *types.Transaction {
- return types.NewTx(&types.BlobTx{
- Nonce: 1,
- To: testAddr,
- Value: uint256.NewInt(1000),
- Gas: 1000,
- Data: []byte("data"),
- })
-}
-
-func testDepositTx() *types.Transaction {
- return types.NewTx(&types.DepositTx{
- From: testAddr,
- Value: big.NewInt(1000),
- Gas: 1000,
- })
-}
-
-func generateBech32MAddress() string {
- addressKey, err := crypto.GenerateKey()
- if err != nil {
- panic(err)
- }
- bridgeAddress := crypto.PubkeyToAddress(addressKey.PublicKey)
- bridgeAddressBytes, err := bech32.ConvertBits(bridgeAddress.Bytes(), 8, 5, false)
- if err != nil {
- panic(err)
- }
-
- bech32m, err := bech32.EncodeM("astria", bridgeAddressBytes)
- if err != nil {
- panic(err)
- }
-
- return bech32m
-}
-
-func TestSequenceTxValidation(t *testing.T) {
- ethservice, serviceV1Alpha1 := setupExecutionService(t, 10)
-
- blobTx, err := testBlobTx().MarshalBinary()
- require.Nil(t, err, "failed to marshal random blob tx: %v", err)
-
- depositTx, err := testDepositTx().MarshalBinary()
- require.Nil(t, err, "failed to marshal random deposit tx: %v", err)
-
- unsignedTx := types.NewTransaction(uint64(0), common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"), big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
- tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey)
- require.Nil(t, err, "failed to sign tx: %v", err)
-
- validMarshalledTx, err := tx.MarshalBinary()
- require.Nil(t, err, "failed to marshal valid tx: %v", err)
-
- chainDestinationKey, err := crypto.GenerateKey()
- require.Nil(t, err, "failed to generate chain destination key: %v", err)
- chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey)
-
- bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
- invalidBridgeAssetDenom := "invalid-asset-denom"
-
- invalidHeightBridgeAssetDenom := "invalid-height-asset-denom"
- invalidHeightBridgeAddressBech32m := generateBech32MAddress()
- serviceV1Alpha1.bridgeAddresses[invalidHeightBridgeAddressBech32m] = ¶ms.AstriaBridgeAddressConfig{
- AssetDenom: invalidHeightBridgeAssetDenom,
- StartHeight: 100,
- }
-
- bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
-
- tests := []struct {
- description string
- sequencerTx *sequencerblockv1.RollupData
- // just check if error contains the string since error contains other details
- wantErr string
- }{
- {
- description: "unmarshallable sequencer tx",
- sequencerTx: &sequencerblockv1.RollupData{
- Value: &sequencerblockv1.RollupData_SequencedData{
- SequencedData: []byte("unmarshallable tx"),
- },
- },
- wantErr: "failed to unmarshal sequenced data into transaction",
- },
- {
- description: "blob type sequence tx",
- sequencerTx: &sequencerblockv1.RollupData{
- Value: &sequencerblockv1.RollupData_SequencedData{
- SequencedData: blobTx,
- },
- },
- wantErr: "blob tx not allowed in sequenced data",
- },
- {
- description: "deposit type sequence tx",
- sequencerTx: &sequencerblockv1.RollupData{
- Value: &sequencerblockv1.RollupData_SequencedData{
- SequencedData: depositTx,
- },
- },
- wantErr: "deposit tx not allowed in sequenced data",
- },
- {
- description: "deposit tx with an unknown bridge address",
- sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
- BridgeAddress: &primitivev1.Address{
- Bech32M: generateBech32MAddress(),
- },
- Asset: bridgeAssetDenom,
- Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
- RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
- DestinationChainAddress: chainDestinationAddress.String(),
- SourceTransactionId: &primitivev1.TransactionId{
- Inner: "test_tx_hash",
- },
- SourceActionIndex: 0,
- }}},
- wantErr: "unknown bridge address",
- },
- {
- description: "deposit tx with a disallowed asset id",
- sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
- BridgeAddress: &primitivev1.Address{
- Bech32M: bridgeAddress,
- },
- Asset: invalidBridgeAssetDenom,
- Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
- RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
- DestinationChainAddress: chainDestinationAddress.String(),
- SourceTransactionId: &primitivev1.TransactionId{
- Inner: "test_tx_hash",
- },
- SourceActionIndex: 0,
- }}},
- wantErr: "disallowed asset",
- },
- {
- description: "deposit tx with a height and asset below the bridge start height",
- sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
- BridgeAddress: &primitivev1.Address{
- Bech32M: invalidHeightBridgeAddressBech32m,
- },
- Asset: invalidHeightBridgeAssetDenom,
- Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
- RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
- DestinationChainAddress: chainDestinationAddress.String(),
- SourceTransactionId: &primitivev1.TransactionId{
- Inner: "test_tx_hash",
- },
- SourceActionIndex: 0,
- }}},
- wantErr: "not allowed before height",
- },
- {
- description: "valid deposit tx",
- sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
- BridgeAddress: &primitivev1.Address{
- Bech32M: bridgeAddress,
- },
- Asset: bridgeAssetDenom,
- Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
- RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
- DestinationChainAddress: chainDestinationAddress.String(),
- SourceTransactionId: &primitivev1.TransactionId{
- Inner: "test_tx_hash",
- },
- SourceActionIndex: 0,
- }}},
- wantErr: "",
- },
- {
- description: "valid sequencer tx",
- sequencerTx: &sequencerblockv1.RollupData{
- Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: validMarshalledTx},
- },
- wantErr: "",
- },
- }
-
- for _, test := range tests {
- t.Run(test.description, func(t *testing.T) {
- _, err := validateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.bridgeAddresses, serviceV1Alpha1.bridgeAllowedAssets)
- if test.wantErr == "" && err == nil {
- return
- }
- require.False(t, test.wantErr == "" && err != nil, "expected error, got nil")
- require.Contains(t, err.Error(), test.wantErr)
- })
- }
-}
diff --git a/grpc/optimistic/mock_grpc_stream.go b/grpc/optimistic/mock_grpc_stream.go
new file mode 100644
index 000000000..1696ff8be
--- /dev/null
+++ b/grpc/optimistic/mock_grpc_stream.go
@@ -0,0 +1,91 @@
+package optimistic
+
+import (
+ "context"
+ "google.golang.org/grpc/metadata"
+ "io"
+ "time"
+)
+
+type MockBidirectionalStreaming[K any, V any] struct {
+ requestStream []*K
+ accumulatedResponses []*V
+ requestCounter uint64
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) Recv() (*K, error) {
+ // add a delay to make it look like an async stream
+ time.Sleep(2 * time.Second)
+ if ms.requestCounter > uint64(len(ms.requestStream)-1) {
+ // end the stream after all the packets have been sent
+ return nil, io.EOF
+ }
+
+ req := ms.requestStream[ms.requestCounter]
+ ms.requestCounter += 1
+
+ return req, nil
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) Send(res *V) error {
+ ms.accumulatedResponses = append(ms.accumulatedResponses, res)
+ return nil
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) SetHeader(md metadata.MD) error {
+ panic("implement me")
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) SendHeader(md metadata.MD) error {
+ panic("implement me")
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) SetTrailer(md metadata.MD) {
+ panic("implement me")
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) Context() context.Context {
+ return context.Background()
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) SendMsg(m any) error {
+ panic("implement me")
+}
+
+func (ms *MockBidirectionalStreaming[K, V]) RecvMsg(m any) error {
+ panic("implement me")
+}
+
+type MockServerSideStreaming[K any] struct {
+ sentResponses []*K
+}
+
+func (ms *MockServerSideStreaming[K]) SendMsg(m any) error {
+ //TODO implement me
+ panic("implement me")
+}
+
+func (ms *MockServerSideStreaming[K]) Send(res *K) error {
+ ms.sentResponses = append(ms.sentResponses, res)
+ return nil
+}
+
+func (ms *MockServerSideStreaming[K]) SetHeader(md metadata.MD) error {
+ panic("implement me")
+}
+
+func (ms *MockServerSideStreaming[K]) SendHeader(md metadata.MD) error {
+ panic("implement me")
+}
+
+func (ms *MockServerSideStreaming[K]) SetTrailer(md metadata.MD) {
+ panic("implement me")
+}
+
+func (ms *MockServerSideStreaming[K]) Context() context.Context {
+ return context.Background()
+}
+
+func (ms *MockServerSideStreaming[K]) RecvMsg(m any) error {
+ panic("implement me")
+}
diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go
new file mode 100644
index 000000000..52cf8116c
--- /dev/null
+++ b/grpc/optimistic/server.go
@@ -0,0 +1,303 @@
+package optimistic
+
+import (
+ optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc"
+ optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1"
+ astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1"
+ "context"
+ "errors"
+ "github.com/ethereum/go-ethereum/beacon/engine"
+ "github.com/ethereum/go-ethereum/common"
+ cmath "github.com/ethereum/go-ethereum/common/math"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/grpc/shared"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/miner"
+ "github.com/ethereum/go-ethereum/params"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "io"
+ "math/big"
+ "sync"
+ "sync/atomic"
+ "time"
+)
+
+type OptimisticServiceV1Alpha1 struct {
+ optimisticGrpc.UnimplementedOptimisticExecutionServiceServer
+ optimisticGrpc.UnimplementedAuctionServiceServer
+
+ sharedServiceContainer *shared.SharedServiceContainer
+
+ currentOptimisticSequencerBlock atomic.Pointer[[]byte]
+}
+
+var (
+ executeOptimisticBlockRequestCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_requests", nil)
+ executeOptimisticBlockSuccessCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_success", nil)
+ optimisticBlockHeight = metrics.GetOrRegisterGauge("astria/execution/optimistic_block_height", nil)
+ txsStreamedCount = metrics.GetOrRegisterCounter("astria/optimistic/txs_streamed", nil)
+
+ executionOptimisticBlockTimer = metrics.GetOrRegisterTimer("astria/optimistic/execute_optimistic_block_time", nil)
+)
+
+func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceContainer) *OptimisticServiceV1Alpha1 {
+ optimisticService := &OptimisticServiceV1Alpha1{
+ sharedServiceContainer: sharedServiceContainer,
+ }
+
+ optimisticService.currentOptimisticSequencerBlock.Store(&[]byte{})
+
+ return optimisticService
+}
+
+func (o *OptimisticServiceV1Alpha1) GetBidStream(_ *optimsticPb.GetBidStreamRequest, stream optimisticGrpc.AuctionService_GetBidStreamServer) error {
+ log.Debug("GetBidStream called")
+
+ pendingTxEventCh := make(chan core.NewTxsEvent)
+ pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false)
+ defer pendingTxEvent.Unsubscribe()
+
+ for {
+ select {
+ case pendingTxs := <-pendingTxEventCh:
+ // get the optimistic block
+ // this is an in-memory read, so there shouldn't be a lot of concerns on speed
+ optimisticBlock := o.Eth().BlockChain().CurrentOptimisticBlock()
+
+ for _, pendingTx := range pendingTxs.Txs {
+ bid := optimsticPb.Bid{}
+
+ totalCost := big.NewInt(0)
+ effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee))
+ totalCost.Add(totalCost, effectiveTip)
+
+ marshalledTxs := [][]byte{}
+ marshalledTx, err := pendingTx.MarshalBinary()
+ if err != nil {
+ return status.Errorf(codes.Internal, shared.WrapError(err, "error marshalling tx").Error())
+ }
+ marshalledTxs = append(marshalledTxs, marshalledTx)
+
+ bid.Fee = totalCost.Uint64()
+ bid.Transactions = marshalledTxs
+ bid.SequencerParentBlockHash = *o.currentOptimisticSequencerBlock.Load()
+ bid.RollupParentBlockHash = optimisticBlock.Hash().Bytes()
+
+ txsStreamedCount.Inc(1)
+ err = stream.Send(&optimsticPb.GetBidStreamResponse{Bid: &bid})
+ if err != nil {
+ log.Error("error sending bid over stream", "err", err)
+ return status.Error(codes.Internal, shared.WrapError(err, "error sending bid over stream").Error())
+ }
+ }
+
+ case err := <-pendingTxEvent.Err():
+ if err != nil {
+ log.Error("error waiting for pending transactions", "err", err)
+ return status.Error(codes.Internal, shared.WrapError(err, "error waiting for pending transactions").Error())
+ } else {
+ // TODO - what is the right error code here?
+ return status.Error(codes.Internal, "tx pool subscription closed")
+ }
+
+ case <-stream.Context().Done():
+ return stream.Context().Err()
+ }
+ }
+}
+
+func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error {
+ log.Debug("ExecuteOptimisticBlockStream called")
+
+ mempoolClearingEventCh := make(chan core.NewMempoolCleared)
+ mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh)
+ defer mempoolClearingEvent.Unsubscribe()
+
+ for {
+ msg, err := stream.Recv()
+ // stream has been closed
+ if errors.Is(err, io.EOF) {
+ return nil
+ }
+ if err != nil {
+ return err
+ }
+
+ executeOptimisticBlockRequestCount.Inc(1)
+
+ baseBlock := msg.GetBaseBlock()
+
+ // execute the optimistic block and wait for the mempool clearing event
+ optimisticBlock, err := o.ExecuteOptimisticBlock(stream.Context(), baseBlock)
+ if err != nil {
+ return status.Errorf(codes.Internal, shared.WrapError(err, "failed to execute optimistic block").Error())
+ }
+ optimisticBlockHash := common.BytesToHash(optimisticBlock.Hash)
+
+ // listen to the mempool clearing event and send the response back to the auctioneer when the mempool is cleared
+ select {
+ case event := <-mempoolClearingEventCh:
+ if event.NewHead.Hash() != optimisticBlockHash {
+ return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution")
+ }
+ o.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash)
+ executeOptimisticBlockSuccessCount.Inc(1)
+ err = stream.Send(&optimsticPb.ExecuteOptimisticBlockStreamResponse{
+ Block: optimisticBlock,
+ BaseSequencerBlockHash: baseBlock.SequencerBlockHash,
+ })
+ case <-time.After(500 * time.Millisecond):
+ log.Error("timed out waiting for mempool to clear after optimistic block execution")
+ return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution")
+ case err := <-mempoolClearingEvent.Err():
+ if err != nil {
+ log.Error("error waiting for mempool clearing event", "err", err)
+ return status.Errorf(codes.Internal, shared.WrapError(err, "error waiting for mempool clearing event").Error())
+ } else {
+ // TODO - what is the right error code here?
+ return status.Error(codes.Internal, "mempool clearance subscription closed")
+ }
+ case <-stream.Context().Done():
+ return stream.Context().Err()
+ }
+ }
+}
+
+func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) {
+ // we need to execute the optimistic block
+ log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash)
+
+ // Deliberately called after lock, to more directly measure the time spent executing
+ executionStart := time.Now()
+ defer executionOptimisticBlockTimer.UpdateSince(executionStart)
+
+ if err := validateStaticExecuteOptimisticBlockRequest(req); err != nil {
+ log.Error("ExecuteOptimisticBlock called with invalid BaseBlock", "err", err)
+ return nil, status.Error(codes.InvalidArgument, shared.WrapError(err, "invalid BaseBlock").Error())
+ }
+
+ if !o.SyncMethodsCalled() {
+ return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called")
+ }
+
+ softBlock := o.Bc().CurrentSafeBlock()
+
+ nextFeeRecipient := o.NextFeeRecipient()
+
+ // the height that this block will be at
+ height := o.Bc().CurrentBlock().Number.Uint64() + 1
+
+ addressPrefix := o.Bc().Config().AstriaSequencerAddressPrefix
+
+ txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes(), o.AuctioneerAddress(), addressPrefix)
+
+ // Build a payload to add to the chain
+ payloadAttributes := &miner.BuildPayloadArgs{
+ Parent: softBlock.Hash(),
+ Timestamp: uint64(req.GetTimestamp().GetSeconds()),
+ Random: common.Hash{},
+ FeeRecipient: nextFeeRecipient,
+ OverrideTransactions: txsToProcess,
+ IsOptimisticExecution: true,
+ }
+ payload, err := o.Eth().Miner().BuildPayload(payloadAttributes)
+ if err != nil {
+ log.Error("failed to build payload", "err", err)
+ return nil, status.Errorf(codes.InvalidArgument, shared.WrapError(err, "failed to build payload").Error())
+ }
+
+ block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil)
+ if err != nil {
+ log.Error("failed to convert executable data to block", err)
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to convert executable data to block").Error())
+ }
+
+ // this will insert the optimistic block into the chain and persist it's state without
+ // setting it as the HEAD.
+ err = o.Bc().InsertBlockWithoutSetHead(block)
+ if err != nil {
+ log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", block.ParentHash(), "err", err)
+ return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to insert block to chain").Error())
+ }
+
+ // we store a pointer to the optimistic block in the chain so that we can use it
+ // to retrieve the state of the optimistic block
+ // this method also sends an event which indicates that a new optimistic block has been set
+ // the mempool clearing logic is triggered when this event is received
+ o.Bc().SetOptimistic(block)
+
+ res := &astriaPb.Block{
+ Number: uint32(block.NumberU64()),
+ Hash: block.Hash().Bytes(),
+ ParentBlockHash: block.ParentHash().Bytes(),
+ Timestamp: ×tamppb.Timestamp{
+ Seconds: int64(block.Time()),
+ },
+ }
+
+ optimisticBlockHeight.Update(int64(block.NumberU64()))
+
+ log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp)
+
+ return res, nil
+}
+
+func (o *OptimisticServiceV1Alpha1) Eth() *eth.Ethereum {
+ return o.sharedServiceContainer.Eth()
+}
+
+func (o *OptimisticServiceV1Alpha1) Bc() *core.BlockChain {
+ return o.sharedServiceContainer.Bc()
+}
+
+func (o *OptimisticServiceV1Alpha1) SetGenesisInfoCalled(value bool) {
+ o.sharedServiceContainer.SetGenesisInfoCalled(value)
+}
+
+func (o *OptimisticServiceV1Alpha1) GenesisInfoCalled() bool {
+ return o.sharedServiceContainer.GenesisInfoCalled()
+}
+
+func (o *OptimisticServiceV1Alpha1) SetGetCommitmentStateCalled(value bool) {
+ o.sharedServiceContainer.SetGetCommitmentStateCalled(value)
+}
+
+func (o *OptimisticServiceV1Alpha1) CommitmentStateCalled() bool {
+ return o.sharedServiceContainer.CommitmentStateCalled()
+}
+
+func (o *OptimisticServiceV1Alpha1) CommitmentUpdateLock() *sync.Mutex {
+ return o.sharedServiceContainer.CommitmentUpdateLock()
+}
+
+func (o *OptimisticServiceV1Alpha1) BlockExecutionLock() *sync.Mutex {
+ return o.sharedServiceContainer.BlockExecutionLock()
+}
+
+func (o *OptimisticServiceV1Alpha1) NextFeeRecipient() common.Address {
+ return o.sharedServiceContainer.NextFeeRecipient()
+}
+
+func (o *OptimisticServiceV1Alpha1) SetNextFeeRecipient(feeRecipient common.Address) {
+ o.sharedServiceContainer.SetNextFeeRecipient(feeRecipient)
+}
+
+func (s *OptimisticServiceV1Alpha1) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig {
+ return s.sharedServiceContainer.BridgeAddresses()
+}
+
+func (s *OptimisticServiceV1Alpha1) BridgeAllowedAssets() map[string]struct{} {
+ return s.sharedServiceContainer.BridgeAllowedAssets()
+}
+
+func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool {
+ return s.sharedServiceContainer.SyncMethodsCalled()
+}
+
+func (s *OptimisticServiceV1Alpha1) AuctioneerAddress() string {
+ return s.sharedServiceContainer.AuctioneerAddress()
+}
diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go
new file mode 100644
index 000000000..538b0433b
--- /dev/null
+++ b/grpc/optimistic/server_test.go
@@ -0,0 +1,456 @@
+package optimistic
+
+import (
+ optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1"
+ astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1"
+ primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1"
+ sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1"
+ "bytes"
+ "context"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/grpc/execution"
+ "github.com/ethereum/go-ethereum/grpc/shared"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/stretchr/testify/require"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
+ "google.golang.org/protobuf/types/known/timestamppb"
+ "math/big"
+ "testing"
+ "time"
+)
+
+func TestOptimisticServiceServerV1Alpha1_ExecuteOptimisticBlock(t *testing.T) {
+ ethService, _, _, _ := shared.SetupSharedService(t, 10)
+
+ tests := []struct {
+ description string
+ callGenesisInfoAndGetCommitmentState bool
+ numberOfTxs int
+ prevBlockHash []byte
+ timestamp uint64
+ depositTxAmount *big.Int // if this is non zero then we send a deposit tx
+ expectedReturnCode codes.Code
+ }{
+ {
+ description: "ExecuteOptimisticBlock without calling GetGenesisInfo and GetCommitmentState",
+ callGenesisInfoAndGetCommitmentState: false,
+ numberOfTxs: 5,
+ prevBlockHash: ethService.BlockChain().GetBlockByNumber(2).Hash().Bytes(),
+ timestamp: ethService.BlockChain().GetBlockByNumber(2).Time() + 2,
+ depositTxAmount: big.NewInt(0),
+ expectedReturnCode: codes.PermissionDenied,
+ },
+ {
+ description: "ExecuteOptimisticBlock with 5 txs and no deposit tx",
+ callGenesisInfoAndGetCommitmentState: true,
+ numberOfTxs: 5,
+ prevBlockHash: ethService.BlockChain().CurrentSafeBlock().Hash().Bytes(),
+ timestamp: ethService.BlockChain().CurrentSafeBlock().Time + 2,
+ depositTxAmount: big.NewInt(0),
+ expectedReturnCode: 0,
+ },
+ {
+ description: "ExecuteOptimisticBlock with 5 txs and a deposit tx",
+ callGenesisInfoAndGetCommitmentState: true,
+ numberOfTxs: 5,
+ prevBlockHash: ethService.BlockChain().CurrentSafeBlock().Hash().Bytes(),
+ timestamp: ethService.BlockChain().CurrentSafeBlock().Time + 2,
+ depositTxAmount: big.NewInt(1000000000000000000),
+ expectedReturnCode: 0,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.description, func(t *testing.T) {
+ ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10)
+
+ // reset the blockchain with each test
+ optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService)
+ executionServiceV1 := execution.SetupExecutionService(t, sharedService)
+
+ var err error // adding this to prevent shadowing of genesisInfo in the below if branch
+ var genesisInfo *astriaPb.GenesisInfo
+ var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState
+ if tt.callGenesisInfoAndGetCommitmentState {
+ // call getGenesisInfo and getCommitmentState before calling executeBlock
+ genesisInfo, err = executionServiceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
+ require.Nil(t, err, "GetGenesisInfo failed")
+ require.NotNil(t, genesisInfo, "GenesisInfo is nil")
+
+ commitmentStateBeforeExecuteBlock, err = executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ require.Nil(t, err, "GetCommitmentState failed")
+ require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil")
+ }
+
+ // create the txs to send
+ // create 5 txs
+ txs := []*types.Transaction{}
+ marshalledTxs := []*sequencerblockv1.RollupData{}
+ for i := 0; i < 5; i++ {
+ unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
+ require.Nil(t, err, "Failed to sign tx")
+ txs = append(txs, tx)
+
+ marshalledTx, err := tx.MarshalBinary()
+ require.Nil(t, err, "Failed to marshal tx")
+ marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx},
+ })
+ }
+
+ // create deposit tx if depositTxAmount is non zero
+ if tt.depositTxAmount.Cmp(big.NewInt(0)) != 0 {
+ depositAmount := shared.BigIntToProtoU128(tt.depositTxAmount)
+ bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
+ bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
+
+ // create new chain destination address for better testing
+ chainDestinationAddressPrivKey, err := crypto.GenerateKey()
+ require.Nil(t, err, "Failed to generate chain destination address")
+
+ chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey)
+
+ depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: bridgeAddress,
+ },
+ Asset: bridgeAssetDenom,
+ Amount: depositAmount,
+ RollupId: genesisInfo.RollupId,
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ }}}
+
+ marshalledTxs = append(marshalledTxs, depositTx)
+ }
+
+ optimisticHeadCh := make(chan core.ChainOptimisticHeadEvent, 1)
+ optimsticHeadSub := ethservice.BlockChain().SubscribeChainOptimisticHeadEvent(optimisticHeadCh)
+ defer optimsticHeadSub.Unsubscribe()
+
+ baseBlockReq := &optimsticPb.BaseBlock{
+ Timestamp: ×tamppb.Timestamp{
+ Seconds: int64(tt.timestamp),
+ },
+ Transactions: marshalledTxs,
+ SequencerBlockHash: []byte("test_hash"),
+ }
+
+ res, err := optimisticServiceV1Alpha1.ExecuteOptimisticBlock(context.Background(), baseBlockReq)
+ if tt.expectedReturnCode > 0 {
+ require.NotNil(t, err, "ExecuteOptimisticBlock should return an error")
+ require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteOptimisticBlock failed")
+ } else {
+ require.Nil(t, err, "ExecuteOptimisticBlock failed")
+ }
+ if err == nil {
+ require.NotNil(t, res, "ExecuteOptimisticBlock response is nil")
+
+ astriaOrdered := ethservice.TxPool().AstriaOrdered()
+ require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty")
+
+ // check if commitment state is not updated
+ commitmentStateAfterExecuteBlock, err := executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ require.Nil(t, err, "GetCommitmentState failed")
+
+ require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated")
+
+ // check if the optimistic block is set
+ optimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock()
+ require.NotNil(t, optimisticBlock, "Optimistic block is not set")
+
+ // check if the optimistic block is correct
+ require.Equal(t, common.BytesToHash(res.Hash), optimisticBlock.Hash(), "Optimistic block hashes do not match")
+ require.Equal(t, common.BytesToHash(res.ParentBlockHash), optimisticBlock.ParentHash, "Optimistic block parent hashes do not match")
+ require.Equal(t, uint64(res.Number), optimisticBlock.Number.Uint64(), "Optimistic block numbers do not match")
+
+ // check if optimistic block is inserted into chain
+ block := ethservice.BlockChain().GetBlockByHash(optimisticBlock.Hash())
+ require.NotNil(t, block, "Optimistic block not found in blockchain")
+ require.Equal(t, uint64(res.Number), block.NumberU64(), "Block number is not correct")
+
+ // timeout for optimistic head event
+ select {
+ case blockEvent := <-optimisticHeadCh:
+ require.NotNil(t, blockEvent, "Optimistic head event not received")
+ require.Equal(t, block.Hash(), blockEvent.Block.Hash(), "Optimistic head event block hash is not correct")
+ require.Equal(t, block.NumberU64(), blockEvent.Block.NumberU64(), "Optimistic head event block number is not correct")
+ case <-time.After(2 * time.Second):
+ require.FailNow(t, "Optimistic head event not received")
+ case err := <-optimsticHeadSub.Err():
+ require.Nil(t, err, "Optimistic head event subscription failed")
+ }
+ }
+ })
+ }
+}
+
+func TestNewOptimisticServiceServerV1Alpha_StreamBids(t *testing.T) {
+ ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10)
+
+ optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService)
+ executionServiceV1 := execution.SetupExecutionService(t, sharedService)
+
+ // call genesis info
+ genesisInfo, err := executionServiceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
+ require.Nil(t, err, "GetGenesisInfo failed")
+ require.NotNil(t, genesisInfo, "GenesisInfo is nil")
+
+ // call get commitment state
+ commitmentState, err := executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ require.Nil(t, err, "GetCommitmentState failed")
+ require.NotNil(t, commitmentState, "CommitmentState is nil")
+
+ // get previous block hash
+ previousBlock := ethservice.BlockChain().CurrentSafeBlock()
+ require.NotNil(t, previousBlock, "Previous block not found")
+
+ // create the optimistic block via the StreamExecuteOptimisticBlock rpc
+ requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{}
+ sequencerBlockHash := []byte("sequencer_block_hash")
+
+ // create 1 stream item with 5 txs
+ txs := []*types.Transaction{}
+ marshalledTxs := []*sequencerblockv1.RollupData{}
+ for i := 0; i < 5; i++ {
+ unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
+ require.Nil(t, err, "Failed to sign tx")
+ txs = append(txs, tx)
+
+ marshalledTx, err := tx.MarshalBinary()
+ require.Nil(t, err, "Failed to marshal tx")
+ marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx},
+ })
+ }
+
+ txErrors := ethservice.TxPool().Add(txs, true, false)
+ for _, txErr := range txErrors {
+ require.Nil(t, txErr, "Failed to add tx to mempool")
+ }
+
+ pending, queued := ethservice.TxPool().Stats()
+ require.Equal(t, pending, 5, "Mempool should have 5 pending txs")
+ require.Equal(t, queued, 0, "Mempool should have 0 queued txs")
+
+ req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{
+ SequencerBlockHash: sequencerBlockHash,
+ Transactions: marshalledTxs,
+ Timestamp: ×tamppb.Timestamp{
+ Seconds: int64(previousBlock.Time + 2),
+ },
+ }}
+
+ requestStreams = append(requestStreams, &req)
+
+ mockBidirectionalStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{
+ requestStream: requestStreams,
+ accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{},
+ requestCounter: 0,
+ }
+
+ errorCh := make(chan error)
+ go func(errorCh chan error) {
+ errorCh <- optimisticServiceV1Alpha1.ExecuteOptimisticBlockStream(mockBidirectionalStream)
+ }(errorCh)
+
+ select {
+ // stream either errors out of gets closed
+ case err := <-errorCh:
+ require.Nil(t, err, "StreamExecuteOptimisticBlock failed")
+ }
+
+ require.Len(t, mockBidirectionalStream.accumulatedResponses, 1, "Number of responses should match the number of requests")
+ accumulatedResponse := mockBidirectionalStream.accumulatedResponses[0]
+
+ currentOptimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock()
+ require.NotNil(t, currentOptimisticBlock, "Optimistic block is not set")
+ require.True(t, bytes.Equal(accumulatedResponse.GetBlock().Hash, currentOptimisticBlock.Hash().Bytes()), "Optimistic block hashes do not match")
+ require.True(t, bytes.Equal(accumulatedResponse.GetBlock().ParentBlockHash, currentOptimisticBlock.ParentHash.Bytes()), "Optimistic block parent hashes do not match")
+ require.Equal(t, uint64(accumulatedResponse.GetBlock().Number), currentOptimisticBlock.Number.Uint64(), "Optimistic block numbers do not match")
+
+ // assert mempool is cleared
+ astriaOrdered := ethservice.TxPool().AstriaOrdered()
+ require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty")
+
+ pending, queued = ethservice.TxPool().Stats()
+ require.Equal(t, pending, 0, "Mempool should have 0 pending txs")
+ require.Equal(t, queued, 0, "Mempool should have 0 queued txs")
+
+ mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBidStreamResponse]{
+ sentResponses: []*optimsticPb.GetBidStreamResponse{},
+ }
+
+ errorCh = make(chan error)
+ go func() {
+ errorCh <- optimisticServiceV1Alpha1.GetBidStream(&optimsticPb.GetBidStreamRequest{}, &mockServerSideStreaming)
+ }()
+
+ stateDb, err := ethservice.BlockChain().StateAt(currentOptimisticBlock.Root)
+ require.Nil(t, err, "Failed to get state db")
+
+ latestNonce := stateDb.GetNonce(shared.TestAddr)
+
+ // optimistic block is created, we can now add txs and check if they get streamed
+ // create 5 txs
+ txs = []*types.Transaction{}
+ for i := 0; i < 5; i++ {
+ unsignedTx := types.NewTransaction(latestNonce+uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
+ require.Nil(t, err, "Failed to sign tx")
+ txs = append(txs, tx)
+
+ marshalledTx, err := tx.MarshalBinary()
+ require.Nil(t, err, "Failed to marshal tx")
+ marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx},
+ })
+ }
+
+ txErrors = ethservice.TxPool().Add(txs, true, false)
+ for _, txErr := range txErrors {
+ require.Nil(t, txErr, "Failed to add tx to mempool")
+ }
+
+ pending, queued = ethservice.TxPool().Stats()
+ require.Equal(t, pending, 5, "Mempool should have 5 pending txs")
+ require.Equal(t, queued, 0, "Mempool should have 0 queued txs")
+
+ // give some time for the txs to stream
+ time.Sleep(5 * time.Second)
+
+ // close the mempool to error the method out
+ err = ethservice.TxPool().Close()
+ require.Nil(t, err, "Failed to close mempool")
+
+ select {
+ case err := <-errorCh:
+ require.ErrorContains(t, err, "tx pool subscription closed")
+ }
+
+ require.Len(t, mockServerSideStreaming.sentResponses, 5, "Number of responses should match the number of requests")
+
+ txIndx := 0
+ for _, resp := range mockServerSideStreaming.sentResponses {
+ bid := resp.GetBid()
+
+ require.Len(t, bid.Transactions, 1, "Bid should have 1 tx")
+
+ receivedTx := bid.Transactions[0]
+ sentTx := txs[txIndx]
+ marshalledSentTx, err := sentTx.MarshalBinary()
+ require.Nil(t, err, "Failed to marshal tx")
+ require.True(t, bytes.Equal(receivedTx, marshalledSentTx), "Received tx does not match sent tx")
+ txIndx += 1
+
+ require.True(t, bytes.Equal(bid.RollupParentBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash")
+ require.True(t, bytes.Equal(bid.SequencerParentBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash")
+ }
+}
+
+func TestOptimisticServiceServerV1_StreamExecuteOptimisticBlock(t *testing.T) {
+ ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10)
+
+ optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService)
+ executionServiceV1 := execution.SetupExecutionService(t, sharedService)
+
+ // call genesis info
+ genesisInfo, err := executionServiceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{})
+ require.Nil(t, err, "GetGenesisInfo failed")
+ require.NotNil(t, genesisInfo, "GenesisInfo is nil")
+
+ // call get commitment state
+ commitmentState, err := executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{})
+ require.Nil(t, err, "GetCommitmentState failed")
+ require.NotNil(t, commitmentState, "CommitmentState is nil")
+
+ // get previous block hash
+ previousBlock := ethservice.BlockChain().CurrentSafeBlock()
+ require.NotNil(t, previousBlock, "Previous block not found")
+
+ requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{}
+ sequencerBlockHash := []byte("sequencer_block_hash")
+
+ // create 1 stream item with 5 txs
+ txs := []*types.Transaction{}
+ marshalledTxs := []*sequencerblockv1.RollupData{}
+ for i := 0; i < 5; i++ {
+ unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil)
+ tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey)
+ require.Nil(t, err, "Failed to sign tx")
+ txs = append(txs, tx)
+
+ marshalledTx, err := tx.MarshalBinary()
+ require.Nil(t, err, "Failed to marshal tx")
+ marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx},
+ })
+ }
+
+ errs := ethservice.TxPool().Add(txs, true, false)
+ for _, err := range errs {
+ require.Nil(t, err, "Failed to add tx to mempool")
+ }
+
+ pending, queued := ethservice.TxPool().Stats()
+ require.Equal(t, pending, 5, "Mempool should have 5 pending txs")
+ require.Equal(t, queued, 0, "Mempool should have 0 queued txs")
+
+ req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{
+ SequencerBlockHash: sequencerBlockHash,
+ Transactions: marshalledTxs,
+ Timestamp: ×tamppb.Timestamp{
+ Seconds: int64(previousBlock.Time + 2),
+ },
+ }}
+
+ requestStreams = append(requestStreams, &req)
+
+ mockStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{
+ requestStream: requestStreams,
+ accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{},
+ requestCounter: 0,
+ }
+
+ errorCh := make(chan error)
+ go func(errorCh chan error) {
+ errorCh <- optimisticServiceV1Alpha1.ExecuteOptimisticBlockStream(mockStream)
+ }(errorCh)
+
+ select {
+ // the stream will either errors out or gets closed
+ case err := <-errorCh:
+ require.Nil(t, err, "StreamExecuteOptimisticBlock failed")
+ }
+
+ accumulatedResponses := mockStream.accumulatedResponses
+
+ require.Equal(t, len(accumulatedResponses), len(mockStream.requestStream), "Number of responses should match the number of requests")
+
+ blockCounter := 1
+ for _, response := range accumulatedResponses {
+ require.True(t, bytes.Equal(response.GetBaseSequencerBlockHash(), sequencerBlockHash), "Sequencer block hash does not match")
+ block := response.GetBlock()
+ require.True(t, bytes.Equal(block.ParentBlockHash, previousBlock.Hash().Bytes()), "Parent block hash does not match")
+ requiredBlockNumber := big.NewInt(0).Add(previousBlock.Number, big.NewInt(int64(blockCounter)))
+ require.Equal(t, requiredBlockNumber.Uint64(), uint64(block.Number), "Block number is not correct")
+ blockCounter += 1
+ }
+
+ // ensure mempool is cleared
+ astriaOrdered := ethservice.TxPool().AstriaOrdered()
+ require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty")
+
+ pending, queued = ethservice.TxPool().Stats()
+ require.Equal(t, pending, 0, "Mempool should have 0 pending txs")
+ require.Equal(t, queued, 0, "Mempool should have 0 queued txs")
+}
diff --git a/grpc/optimistic/test_setup.go b/grpc/optimistic/test_setup.go
new file mode 100644
index 000000000..444132739
--- /dev/null
+++ b/grpc/optimistic/test_setup.go
@@ -0,0 +1,12 @@
+package optimistic
+
+import (
+ "github.com/ethereum/go-ethereum/grpc/shared"
+ "testing"
+)
+
+func SetupOptimisticService(t *testing.T, sharedService *shared.SharedServiceContainer) *OptimisticServiceV1Alpha1 {
+ t.Helper()
+
+ return NewOptimisticServiceV1Alpha(sharedService)
+}
diff --git a/grpc/optimistic/validation.go b/grpc/optimistic/validation.go
new file mode 100644
index 000000000..cbd6c62e6
--- /dev/null
+++ b/grpc/optimistic/validation.go
@@ -0,0 +1,17 @@
+package optimistic
+
+import (
+ optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1"
+ "fmt"
+)
+
+func validateStaticExecuteOptimisticBlockRequest(req *optimsticPb.BaseBlock) error {
+ if req.Timestamp == nil {
+ return fmt.Errorf("Timestamp cannot be nil")
+ }
+ if len(req.SequencerBlockHash) == 0 {
+ return fmt.Errorf("SequencerBlockHash cannot be empty")
+ }
+
+ return nil
+}
diff --git a/grpc/shared/bech32m.go b/grpc/shared/bech32m.go
new file mode 100644
index 000000000..f4c5237ac
--- /dev/null
+++ b/grpc/shared/bech32m.go
@@ -0,0 +1,92 @@
+package shared
+
+// Copied from astria-cli-go bech32m module (https://github.com/astriaorg/astria-cli-go/blob/d5ef82f718325b2907634c108d42b503211c20e6/modules/bech32m/bech32m.go#L1)
+// TODO: organize the bech32m usage throughout the codebase
+
+import (
+ "crypto/ed25519"
+ "crypto/sha256"
+ "fmt"
+
+ "github.com/btcsuite/btcd/btcutil/bech32"
+)
+
+type Address struct {
+ address string
+ prefix string
+ bytes [20]byte
+}
+
+// String returns the bech32m address as a string
+func (a *Address) String() string {
+ return a.address
+}
+
+// Prefix returns the prefix of the bech32m address
+func (a *Address) Prefix() string {
+ return a.prefix
+}
+
+// Bytes returns the underlying bytes for the bech32m address as a [20]byte array
+func (a *Address) Bytes() [20]byte {
+ return a.bytes
+}
+
+// ValidateBech32mAddress verifies that a string in a valid bech32m address. It
+// will return nil if the address is valid, otherwise it will return an error.
+func ValidateBech32mAddress(address string, intendedPrefix string) error {
+ prefix, byteAddress, version, err := bech32.DecodeGeneric(address)
+ if err != nil {
+ return fmt.Errorf("address must be a bech32 encoded string")
+ }
+ if version != bech32.VersionM {
+ return fmt.Errorf("address must be a bech32m address")
+ }
+ byteAddress, err = bech32.ConvertBits(byteAddress, 5, 8, false)
+ if err != nil {
+ return fmt.Errorf("failed to convert address to 8 bit")
+ }
+ if prefix == "" {
+ return fmt.Errorf("address must have prefix")
+ }
+ if prefix != intendedPrefix {
+ return fmt.Errorf("address must have prefix %s", intendedPrefix)
+ }
+
+ if len(byteAddress) != 20 {
+ return fmt.Errorf("address must decode to a 20 length byte array: got len %d", len(byteAddress))
+ }
+
+ return nil
+}
+
+// EncodeFromBytes creates a *Address from a [20]byte array and string
+// prefix.
+func EncodeFromBytes(prefix string, data [20]byte) (string, error) {
+ // Convert the data from 8-bit groups to 5-bit
+ convertedBytes, err := bech32.ConvertBits(data[:], 8, 5, true)
+ if err != nil {
+ return "", fmt.Errorf("failed to convert bits from 8-bit groups to 5-bit groups: %v", err)
+ }
+
+ // Encode the data as bech32m
+ address, err := bech32.EncodeM(prefix, convertedBytes)
+ if err != nil {
+ return "", fmt.Errorf("failed to encode address as bech32m: %v", err)
+ }
+
+ return address, nil
+}
+
+// EncodeFromPublicKey takes an ed25519 public key and string prefix and encodes
+// them into a *Address.
+func EncodeFromPublicKey(prefix string, pubkey ed25519.PublicKey) (string, error) {
+ hash := sha256.Sum256(pubkey)
+ var addr [20]byte
+ copy(addr[:], hash[:20])
+ address, err := EncodeFromBytes(prefix, addr)
+ if err != nil {
+ return "", err
+ }
+ return address, nil
+}
diff --git a/grpc/shared/container.go b/grpc/shared/container.go
new file mode 100644
index 000000000..263971c5e
--- /dev/null
+++ b/grpc/shared/container.go
@@ -0,0 +1,192 @@
+package shared
+
+import (
+ "fmt"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core"
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/pkg/errors"
+ "sync"
+ "sync/atomic"
+)
+
+type SharedServiceContainer struct {
+ eth *eth.Ethereum
+ bc *core.BlockChain
+
+ commitmentUpdateLock sync.Mutex // Lock for the forkChoiceUpdated method
+ blockExecutionLock sync.Mutex // Lock for the NewPayload method
+
+ genesisInfoCalled bool
+ getCommitmentStateCalled bool
+
+ bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account
+ bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty
+
+ // auctioneer address is a bech32m address
+ auctioneerAddress atomic.Pointer[string]
+
+ nextFeeRecipient atomic.Pointer[common.Address] // Fee recipient for the next block
+}
+
+func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, error) {
+ bc := eth.BlockChain()
+
+ if bc.Config().AstriaRollupName == "" {
+ return nil, errors.New("rollup name not set")
+ }
+
+ if bc.Config().AstriaSequencerInitialHeight == 0 {
+ return nil, errors.New("sequencer initial height not set")
+ }
+
+ if bc.Config().AstriaCelestiaInitialHeight == 0 {
+ return nil, errors.New("celestia initial height not set")
+ }
+
+ if bc.Config().AstriaCelestiaHeightVariance == 0 {
+ return nil, errors.New("celestia height variance not set")
+ }
+
+ bridgeAddresses := make(map[string]*params.AstriaBridgeAddressConfig)
+ bridgeAllowedAssets := make(map[string]struct{})
+ if bc.Config().AstriaBridgeAddressConfigs == nil {
+ log.Warn("bridge addresses not set")
+ } else {
+ nativeBridgeSeen := false
+ for _, cfg := range bc.Config().AstriaBridgeAddressConfigs {
+ err := cfg.Validate(bc.Config().AstriaSequencerAddressPrefix)
+ if err != nil {
+ return nil, fmt.Errorf("invalid bridge address config: %w", err)
+ }
+
+ if cfg.Erc20Asset == nil {
+ if nativeBridgeSeen {
+ return nil, errors.New("only one native bridge address is allowed")
+ }
+ nativeBridgeSeen = true
+ }
+
+ if cfg.Erc20Asset != nil && cfg.SenderAddress == (common.Address{}) {
+ return nil, errors.New("astria bridge sender address must be set for bridged ERC20 assets")
+ }
+
+ bridgeCfg := cfg
+ bridgeAddresses[cfg.BridgeAddress] = &bridgeCfg
+ bridgeAllowedAssets[cfg.AssetDenom] = struct{}{}
+ if cfg.Erc20Asset == nil {
+ log.Info("bridge for sequencer native asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom)
+ } else {
+ log.Info("bridge for ERC20 asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom, "contractAddress", cfg.Erc20Asset.ContractAddress)
+ }
+ }
+ }
+
+ // To decrease compute cost, we identify the next fee recipient at the start
+ // and update it as we execute blocks.
+ nextFeeRecipient := common.Address{}
+ nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1
+ if bc.Config().AstriaFeeCollectors == nil {
+ log.Warn("fee asset collectors not set, assets will be burned")
+ } else {
+ maxHeightCollectorMatch := uint32(0)
+ for height, collector := range bc.Config().AstriaFeeCollectors {
+ if height <= nextBlock && height > maxHeightCollectorMatch {
+ maxHeightCollectorMatch = height
+ nextFeeRecipient = collector
+ }
+ }
+ }
+
+ auctioneerAddressesBlockMap := bc.Config().AstriaAuctioneerAddresses
+ auctioneerAddress := ""
+ if auctioneerAddressesBlockMap == nil {
+ return nil, errors.New("auctioneer addresses not set")
+ } else {
+ maxHeightCollectorMatch := uint32(0)
+ for height, address := range auctioneerAddressesBlockMap {
+ if height <= nextBlock && height > maxHeightCollectorMatch {
+ maxHeightCollectorMatch = height
+ if err := ValidateBech32mAddress(address, bc.Config().AstriaSequencerAddressPrefix); err != nil {
+ return nil, errors.Wrapf(err, "auctioneer address %s at height %d is invalid", address, height)
+ }
+ auctioneerAddress = address
+ }
+ }
+ }
+
+ sharedServiceContainer := &SharedServiceContainer{
+ eth: eth,
+ bc: bc,
+ bridgeAddresses: bridgeAddresses,
+ bridgeAllowedAssets: bridgeAllowedAssets,
+ }
+
+ sharedServiceContainer.SetAuctioneerAddress(auctioneerAddress)
+ sharedServiceContainer.SetNextFeeRecipient(nextFeeRecipient)
+
+ return sharedServiceContainer, nil
+}
+
+func (s *SharedServiceContainer) SyncMethodsCalled() bool {
+ return s.genesisInfoCalled && s.getCommitmentStateCalled
+}
+
+func (s *SharedServiceContainer) Bc() *core.BlockChain {
+ return s.bc
+}
+
+func (s *SharedServiceContainer) Eth() *eth.Ethereum {
+ return s.eth
+}
+
+func (s *SharedServiceContainer) SetGenesisInfoCalled(value bool) {
+ s.genesisInfoCalled = value
+}
+
+func (s *SharedServiceContainer) GenesisInfoCalled() bool {
+ return s.genesisInfoCalled
+}
+
+func (s *SharedServiceContainer) SetGetCommitmentStateCalled(value bool) {
+ s.getCommitmentStateCalled = value
+}
+
+func (s *SharedServiceContainer) CommitmentStateCalled() bool {
+ return s.getCommitmentStateCalled
+}
+
+func (s *SharedServiceContainer) CommitmentUpdateLock() *sync.Mutex {
+ return &s.commitmentUpdateLock
+}
+
+func (s *SharedServiceContainer) BlockExecutionLock() *sync.Mutex {
+ return &s.blockExecutionLock
+}
+
+func (s *SharedServiceContainer) NextFeeRecipient() common.Address {
+ return *s.nextFeeRecipient.Load()
+}
+
+// assumes that the block execution lock is being held
+func (s *SharedServiceContainer) SetNextFeeRecipient(nextFeeRecipient common.Address) {
+ s.nextFeeRecipient.Store(&nextFeeRecipient)
+}
+
+func (s *SharedServiceContainer) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig {
+ return s.bridgeAddresses
+}
+
+func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} {
+ return s.bridgeAllowedAssets
+}
+
+func (s *SharedServiceContainer) AuctioneerAddress() string {
+ return *s.auctioneerAddress.Load()
+}
+
+func (s *SharedServiceContainer) SetAuctioneerAddress(newAddress string) {
+ s.auctioneerAddress.Store(&newAddress)
+}
diff --git a/grpc/shared/test_setup.go b/grpc/shared/test_setup.go
new file mode 100644
index 000000000..45078ea15
--- /dev/null
+++ b/grpc/shared/test_setup.go
@@ -0,0 +1,36 @@
+package shared
+
+import (
+ "crypto/ed25519"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/eth"
+ "github.com/stretchr/testify/require"
+ "testing"
+)
+
+func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer, ed25519.PrivateKey, ed25519.PublicKey) {
+ t.Helper()
+ genesis, blocks, bridgeAddress, feeCollectorKey, auctioneerPrivKey, auctioneerPubKey := GenerateMergeChain(noOfBlocksToGenerate, true)
+ ethservice := StartEthService(t, genesis)
+
+ sharedService, err := NewSharedServiceContainer(ethservice)
+ require.Nil(t, err, "can't create shared service")
+
+ feeCollector := crypto.PubkeyToAddress(feeCollectorKey.PublicKey)
+ require.Equal(t, feeCollector, sharedService.NextFeeRecipient(), "nextFeeRecipient not set correctly")
+
+ bridgeAsset := genesis.Config.AstriaBridgeAddressConfigs[0].AssetDenom
+ _, ok := sharedService.BridgeAllowedAssets()[bridgeAsset]
+ require.True(t, ok, "bridgeAllowedAssetIDs does not contain bridge asset id")
+
+ _, ok = sharedService.BridgeAddresses()[bridgeAddress]
+ require.True(t, ok, "bridgeAddress not set correctly")
+
+ _, err = ethservice.BlockChain().InsertChain(blocks)
+ require.Nil(t, err, "can't insert blocks")
+
+ // FIXME - this interface isn't right for the tests, we shouldn't be exposing the auctioneer priv key like this
+ // we should instead allow the test to create it and pass it to the shared service container in the constructor
+ // but that can make the codebase a bit weird, so we can leave it like this for now
+ return ethservice, sharedService, auctioneerPrivKey, auctioneerPubKey
+}
diff --git a/grpc/execution/test_utils.go b/grpc/shared/test_utils.go
similarity index 66%
rename from grpc/execution/test_utils.go
rename to grpc/shared/test_utils.go
index dedab1aa5..69926120f 100644
--- a/grpc/execution/test_utils.go
+++ b/grpc/shared/test_utils.go
@@ -1,7 +1,9 @@
-package execution
+package shared
import (
+ primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1"
"crypto/ecdsa"
+ "crypto/ed25519"
"math/big"
"testing"
"time"
@@ -24,18 +26,18 @@ import (
)
var (
- // testKey is a private key to use for funding a tester account.
- testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
+ // TestKey is a private key to use for funding a tester account.
+ TestKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291")
// testAddr is the Ethereum address of the tester account.
- testAddr = crypto.PubkeyToAddress(testKey.PublicKey)
+ TestAddr = crypto.PubkeyToAddress(TestKey.PublicKey)
- testToAddress = common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a")
+ TestToAddress = common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a")
testBalance = big.NewInt(2e18)
)
-func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey) {
+func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey, ed25519.PrivateKey, ed25519.PublicKey) {
config := *params.AllEthashProtocolChanges
engine := consensus.Engine(beaconConsensus.New(ethash.NewFaker()))
if merged {
@@ -60,6 +62,18 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri
config.AstriaCelestiaInitialHeight = 10
config.AstriaCelestiaHeightVariance = 10
+ auctioneerPubKey, auctioneerPrivKey, err := ed25519.GenerateKey(nil)
+ if err != nil {
+ panic(err)
+ }
+ auctioneerAddress, err := EncodeFromPublicKey(config.AstriaSequencerAddressPrefix, auctioneerPubKey)
+ if err != nil {
+ panic(err)
+ }
+
+ config.AstriaAuctioneerAddresses = make(map[uint32]string)
+ config.AstriaAuctioneerAddresses[1] = auctioneerAddress
+
bech32mBridgeAddress, err := bech32.EncodeM(config.AstriaSequencerAddressPrefix, bridgeAddressBytes)
if err != nil {
panic(err)
@@ -88,7 +102,7 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri
genesis := &core.Genesis{
Config: &config,
Alloc: core.GenesisAlloc{
- testAddr: {Balance: testBalance},
+ TestAddr: {Balance: testBalance},
},
ExtraData: []byte("test genesis"),
Timestamp: 9000,
@@ -99,7 +113,7 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri
generate := func(i int, g *core.BlockGen) {
g.OffsetTime(5)
g.SetExtra([]byte("test"))
- tx, _ := types.SignTx(types.NewTransaction(testNonce, testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), types.LatestSigner(&config), testKey)
+ tx, _ := types.SignTx(types.NewTransaction(testNonce, TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), types.LatestSigner(&config), TestKey)
g.AddTx(tx)
testNonce++
}
@@ -113,15 +127,17 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri
config.TerminalTotalDifficulty = totalDifficulty
}
- return genesis, blocks, bech32mBridgeAddress, feeCollectorKey
+ return genesis, blocks, bech32mBridgeAddress, feeCollectorKey, auctioneerPrivKey, auctioneerPubKey
}
// startEthService creates a full node instance for testing.
-func startEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum {
- n, err := node.New(&node.Config{})
+func StartEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum {
+ n, err := node.New(&node.Config{
+ EnableAuctioneer: true,
+ })
require.Nil(t, err, "can't create node")
mcfg := miner.DefaultConfig
- mcfg.PendingFeeRecipient = testAddr
+ mcfg.PendingFeeRecipient = TestAddr
ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256, Miner: mcfg}
ethservice, err := eth.New(n, ethcfg)
require.Nil(t, err, "can't create eth service")
@@ -130,26 +146,8 @@ func startEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum {
return ethservice
}
-func setupExecutionService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *ExecutionServiceServerV1) {
- t.Helper()
- genesis, blocks, bridgeAddress, feeCollectorKey := generateMergeChain(noOfBlocksToGenerate, true)
- ethservice := startEthService(t, genesis)
-
- serviceV1Alpha1, err := NewExecutionServiceServerV1(ethservice)
- require.Nil(t, err, "can't create execution service")
-
- feeCollector := crypto.PubkeyToAddress(feeCollectorKey.PublicKey)
- require.Equal(t, feeCollector, serviceV1Alpha1.nextFeeRecipient, "nextFeeRecipient not set correctly")
-
- bridgeAsset := genesis.Config.AstriaBridgeAddressConfigs[0].AssetDenom
- _, ok := serviceV1Alpha1.bridgeAllowedAssets[bridgeAsset]
- require.True(t, ok, "bridgeAllowedAssetIDs does not contain bridge asset id")
-
- _, ok = serviceV1Alpha1.bridgeAddresses[bridgeAddress]
- require.True(t, ok, "bridgeAddress not set correctly")
-
- _, err = ethservice.BlockChain().InsertChain(blocks)
- require.Nil(t, err, "can't insert blocks")
-
- return ethservice, serviceV1Alpha1
+func BigIntToProtoU128(i *big.Int) *primitivev1.Uint128 {
+ lo := i.Uint64()
+ hi := new(big.Int).Rsh(i, 64).Uint64()
+ return &primitivev1.Uint128{Lo: lo, Hi: hi}
}
diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go
new file mode 100644
index 000000000..90ae13619
--- /dev/null
+++ b/grpc/shared/validation.go
@@ -0,0 +1,253 @@
+package shared
+
+import (
+ auctionv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1"
+ primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1"
+ sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1"
+ "bytes"
+ "crypto/ed25519"
+ "crypto/sha256"
+ "errors"
+ "fmt"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/contracts"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/log"
+ "github.com/ethereum/go-ethereum/metrics"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/golang/protobuf/proto"
+ proto2 "google.golang.org/protobuf/proto"
+ "google.golang.org/protobuf/types/known/anypb"
+ "math/big"
+ "time"
+)
+
+var (
+ successfulUnbundledAllocations = metrics.GetOrRegisterGauge("astria/optimistic/successful_unbundled_allocations", nil)
+ allocationsWithInvalidPrevBlockHash = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_prev_block_hash", nil)
+ allocationsWithInvalidPubKey = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_pub_key", nil)
+ allocationsWithInvalidSignature = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_signature", nil)
+
+ allocationUnbundlingTimer = metrics.GetOrRegisterTimer("astria/optimistic/allocation_unbundling_time", nil)
+)
+
+func WrapError(err error, msg string) error {
+ return fmt.Errorf("%s: %w", msg, err)
+}
+
+func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int {
+ lo := big.NewInt(0).SetUint64(u128.Lo)
+ hi := big.NewInt(0).SetUint64(u128.Hi)
+ hi.Lsh(hi, 64)
+ return lo.Add(lo, hi)
+}
+
+func validateAndUnmarshalDepositTx(
+ deposit *sequencerblockv1.Deposit,
+ height uint64,
+ bridgeAddresses map[string]*params.AstriaBridgeAddressConfig,
+ bridgeAllowedAssets map[string]struct{}) (*types.Transaction, error) {
+ bridgeAddress := deposit.BridgeAddress.GetBech32M()
+ bac, ok := bridgeAddresses[bridgeAddress]
+ if !ok {
+ return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress)
+ }
+
+ if height < uint64(bac.StartHeight) {
+ return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight)
+ }
+
+ if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok {
+ return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset)
+ }
+
+ if deposit.Asset != bac.AssetDenom {
+ return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress)
+ }
+
+ recipient := common.HexToAddress(deposit.DestinationChainAddress)
+ amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount))
+
+ if bac.Erc20Asset != nil {
+ log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress)
+ abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi()
+ if err != nil {
+ // this should never happen, as the abi is hardcoded in the contract bindings
+ return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err)
+ }
+
+ // pack arguments for calling the `mint` function on the ERC20 contract
+ args := []interface{}{recipient, amount}
+ calldata, err := abi.Pack("mint", args...)
+ if err != nil {
+ return nil, err
+ }
+
+ txdata := types.DepositTx{
+ From: bac.SenderAddress,
+ Value: new(big.Int), // don't need to set this, as we aren't minting the native asset
+ // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer.
+ //
+ // the fees are spent from the "bridge account" which is not actually a real account, but is instead some
+ // address defined by consensus, so the gas cost is not actually deducted from any account.
+ Gas: 64000,
+ To: &bac.Erc20Asset.ContractAddress,
+ Data: calldata,
+ SourceTransactionId: *deposit.SourceTransactionId,
+ SourceTransactionIndex: deposit.SourceActionIndex,
+ }
+
+ tx := types.NewTx(&txdata)
+ return tx, nil
+ }
+
+ txdata := types.DepositTx{
+ From: bac.SenderAddress,
+ To: &recipient,
+ Value: amount,
+ Gas: 0,
+ SourceTransactionId: *deposit.SourceTransactionId,
+ SourceTransactionIndex: deposit.SourceActionIndex,
+ }
+ return types.NewTx(&txdata), nil
+}
+
+func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*types.Transaction, error) {
+ ethTx := new(types.Transaction)
+ err := ethTx.UnmarshalBinary(tx.GetSequencedData())
+ if err != nil {
+ return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData()))
+ }
+
+ if ethTx.Type() == types.DepositTxType {
+ return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData()))
+ }
+
+ if ethTx.Type() == types.BlobTxType {
+ return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData()))
+ }
+
+ return ethTx, nil
+}
+
+func unmarshallAllocationTxs(allocation *auctionv1alpha1.Allocation, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) (types.Transactions, error) {
+ unbundlingStart := time.Now()
+ defer allocationUnbundlingTimer.UpdateSince(unbundlingStart)
+
+ processedTxs := types.Transactions{}
+ bid := &auctionv1alpha1.Bid{}
+
+ unprocessedBid := allocation.GetBid()
+
+ err := anypb.UnmarshalTo(unprocessedBid, bid, proto2.UnmarshalOptions{
+ Merge: false,
+ AllowPartial: false,
+ })
+ if err != nil {
+ return nil, WrapError(err, "failed to unmarshal bid")
+ }
+
+ log.Debug("Found a potential allocation in the rollup data. Checking if it is valid.", "prevBlockHash", common.BytesToHash(prevBlockHash).String(), "auctioneerBech32Address", auctioneerBech32Address)
+
+ if !bytes.Equal(bid.GetRollupParentBlockHash(), prevBlockHash) {
+ allocationsWithInvalidPrevBlockHash.Inc(1)
+ return nil, errors.New("prev block hash in allocation does not match the previous block hash")
+ }
+
+ publicKey := ed25519.PublicKey(allocation.GetPublicKey())
+ bech32Address, err := EncodeFromPublicKey(addressPrefix, publicKey)
+ if err != nil {
+ return nil, WrapError(err, fmt.Sprintf("failed to encode public key to bech32m address: %s", publicKey))
+ }
+
+ if auctioneerBech32Address != bech32Address {
+ allocationsWithInvalidPubKey.Inc(1)
+ return nil, fmt.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address)
+ }
+
+ message, err := proto.Marshal(bid)
+ if err != nil {
+ return nil, WrapError(err, "failed to marshal allocation to verify signature")
+ }
+
+ signature := allocation.GetSignature()
+ if !ed25519.Verify(publicKey, message, signature) {
+ allocationsWithInvalidSignature.Inc(1)
+ return nil, fmt.Errorf("signature in allocation does not match the public key")
+ }
+
+ log.Debug("Allocation is valid. Unmarshalling the transactions in the bid.")
+ // unmarshall the transactions in the bid
+ for _, allocationTx := range bid.GetTransactions() {
+ ethtx := new(types.Transaction)
+ err := ethtx.UnmarshalBinary(allocationTx)
+ if err != nil {
+ return nil, WrapError(err, "failed to unmarshall allocation transaction")
+ }
+ processedTxs = append(processedTxs, ethtx)
+ }
+
+ successfulUnbundledAllocations.Inc(1)
+
+ return processedTxs, nil
+
+}
+
+// `UnbundleRollupDataTransactions` takes in a list of rollup data transactions and returns a list of Ethereum transactions.
+// TODO - this function has become too big. we should start breaking it down
+func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig,
+ bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) types.Transactions {
+
+ processedTxs := types.Transactions{}
+ allocationTxs := types.Transactions{}
+ // we just return the allocation here and do not unmarshall the transactions in the bid if we find it
+ var allocation *auctionv1alpha1.Allocation
+ for _, tx := range txs {
+ if deposit := tx.GetDeposit(); deposit != nil {
+ depositTx, err := validateAndUnmarshalDepositTx(deposit, height, bridgeAddresses, bridgeAllowedAssets)
+ if err != nil {
+ log.Error("failed to validate and unmarshal deposit tx", "error", err)
+ continue
+ }
+
+ processedTxs = append(processedTxs, depositTx)
+ } else {
+ sequenceData := tx.GetSequencedData()
+ // check if sequence data is of type Allocation
+ if allocation == nil {
+ // TODO - check if we can avoid a temp value
+ tempAllocation := &auctionv1alpha1.Allocation{}
+ err := proto.Unmarshal(sequenceData, tempAllocation)
+ if err == nil {
+ unmarshalledAllocationTxs, err := unmarshallAllocationTxs(tempAllocation, prevBlockHash, auctioneerBech32Address, addressPrefix)
+ if err != nil {
+ log.Error("failed to unmarshall allocation transactions", "error", err)
+ continue
+ }
+
+ allocation = tempAllocation
+ allocationTxs = unmarshalledAllocationTxs
+ } else {
+ ethtx, err := validateAndUnmarshallSequenceAction(tx)
+ if err != nil {
+ log.Error("failed to unmarshall sequence action", "error", err)
+ continue
+ }
+ processedTxs = append(processedTxs, ethtx)
+ }
+ } else {
+ ethtx, err := validateAndUnmarshallSequenceAction(tx)
+ if err != nil {
+ log.Error("failed to unmarshall sequence action", "error", err)
+ continue
+ }
+ processedTxs = append(processedTxs, ethtx)
+ }
+ }
+ }
+
+ // prepend allocation txs to processedTxs
+ processedTxs = append(allocationTxs, processedTxs...)
+
+ return processedTxs
+}
diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go
new file mode 100644
index 000000000..3292d5334
--- /dev/null
+++ b/grpc/shared/validation_test.go
@@ -0,0 +1,747 @@
+package shared
+
+import (
+ auctionv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1"
+ "bytes"
+ "crypto/ecdsa"
+ "crypto/ed25519"
+ "github.com/golang/protobuf/proto"
+ "google.golang.org/protobuf/types/known/anypb"
+ "math/big"
+ "testing"
+
+ primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1"
+ sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1"
+ "github.com/btcsuite/btcd/btcutil/bech32"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/ethereum/go-ethereum/core/types"
+ "github.com/ethereum/go-ethereum/crypto"
+ "github.com/ethereum/go-ethereum/params"
+ "github.com/holiman/uint256"
+ "github.com/stretchr/testify/require"
+)
+
+type allocationInfo struct {
+ signature []byte
+ publicKey []byte
+ bid *auctionv1alpha1.Bid
+}
+
+func (a *allocationInfo) convertToAllocation() (*auctionv1alpha1.Allocation, error) {
+ convertedBid, err := anypb.New(a.bid)
+ if err != nil {
+ return nil, err
+ }
+
+ return &auctionv1alpha1.Allocation{
+ Signature: a.signature,
+ PublicKey: a.publicKey,
+ Bid: convertedBid,
+ }, nil
+}
+
+func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Transaction {
+ return pricedTransaction(nonce, gaslimit, big.NewInt(1), key)
+}
+
+func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction {
+ tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key)
+ return tx
+}
+
+func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 {
+ lo := i.Uint64()
+ hi := new(big.Int).Rsh(i, 64).Uint64()
+ return &primitivev1.Uint128{Lo: lo, Hi: hi}
+}
+
+func testBlobTx() *types.Transaction {
+ return types.NewTx(&types.BlobTx{
+ Nonce: 1,
+ To: TestAddr,
+ Value: uint256.NewInt(1000),
+ Gas: 1000,
+ Data: []byte("data"),
+ })
+}
+
+func testDepositTx() *types.Transaction {
+ return types.NewTx(&types.DepositTx{
+ From: TestAddr,
+ Value: big.NewInt(1000),
+ Gas: 1000,
+ })
+}
+
+func generateBech32MAddress() string {
+ addressKey, err := crypto.GenerateKey()
+ if err != nil {
+ panic(err)
+ }
+ bridgeAddress := crypto.PubkeyToAddress(addressKey.PublicKey)
+ bridgeAddressBytes, err := bech32.ConvertBits(bridgeAddress.Bytes(), 8, 5, false)
+ if err != nil {
+ panic(err)
+ }
+
+ bech32m, err := bech32.EncodeM("astria", bridgeAddressBytes)
+ if err != nil {
+ panic(err)
+ }
+
+ return bech32m
+}
+
+func TestUnmarshallAllocationTxs(t *testing.T) {
+ ethService, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10)
+ addressPrefix := ethService.BlockChain().Config().AstriaSequencerAddressPrefix
+
+ tx1 := transaction(0, 1000, TestKey)
+ validMarshalledTx1, err := tx1.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ tx2 := transaction(1, 1000, TestKey)
+ validMarshalledTx2, err := tx2.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ tx3 := transaction(2, 1000, TestKey)
+ validMarshalledTx3, err := tx3.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ validBid := &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3},
+ SequencerParentBlockHash: []byte("sequencer block hash"),
+ RollupParentBlockHash: []byte("prev rollup block hash"),
+ }
+
+ marshalledAllocation, err := proto.Marshal(validBid)
+ require.NoError(t, err, "failed to marshal payload: %v", err)
+
+ signedAllocation, err := auctioneerPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{
+ Hash: 0,
+ Context: "",
+ })
+ require.NoError(t, err, "failed to sign allocation: %v", err)
+
+ tests := []struct {
+ description string
+
+ allocationInfo allocationInfo
+
+ prevBlockHash []byte
+ expectedOutput types.Transactions
+ // just check if error contains the string since error contains other details
+ wantErr string
+ }{
+ {
+ description: "previous block hash mismatch",
+ allocationInfo: allocationInfo{
+ signature: make([]byte, 0),
+ publicKey: make([]byte, 0),
+ bid: &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{[]byte("unmarshallable tx")},
+ SequencerParentBlockHash: []byte("sequencer block hash"),
+ RollupParentBlockHash: []byte("prev rollup block hash"),
+ },
+ },
+ prevBlockHash: []byte("not prev rollup block hash"),
+ expectedOutput: types.Transactions{},
+ wantErr: "prev block hash in allocation does not match the previous block hash",
+ },
+ {
+ description: "public key doesn't match",
+ allocationInfo: allocationInfo{
+ signature: []byte("invalid signature"),
+ publicKey: []byte("invalid public key"),
+ bid: &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{[]byte("unmarshallable tx")},
+ SequencerParentBlockHash: []byte("sequencer block hash"),
+ RollupParentBlockHash: []byte("prev rollup block hash"),
+ },
+ },
+ prevBlockHash: []byte("prev rollup block hash"),
+ expectedOutput: types.Transactions{},
+ wantErr: "address in allocation does not match auctioneer address",
+ },
+ {
+ description: "invalid signature",
+ allocationInfo: allocationInfo{
+ signature: []byte("invalid signature"),
+ publicKey: auctioneerPubKey,
+ bid: &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{[]byte("unmarshallable tx")},
+ SequencerParentBlockHash: []byte("sequencer block hash"),
+ RollupParentBlockHash: []byte("prev rollup block hash"),
+ },
+ },
+ prevBlockHash: []byte("prev rollup block hash"),
+ expectedOutput: types.Transactions{},
+ wantErr: "signature in allocation does not match the public key",
+ },
+ {
+ description: "valid allocation",
+ allocationInfo: allocationInfo{
+ signature: signedAllocation,
+ publicKey: auctioneerPubKey,
+ bid: validBid,
+ },
+ prevBlockHash: []byte("prev rollup block hash"),
+ expectedOutput: types.Transactions{tx1, tx2, tx3},
+ wantErr: "",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.description, func(t *testing.T) {
+ allocation, err := test.allocationInfo.convertToAllocation()
+ require.NoError(t, err, "failed to convert allocation info to allocation: %v", err)
+
+ finalTxs, err := unmarshallAllocationTxs(allocation, test.prevBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix)
+ if test.wantErr == "" && err == nil {
+ for _, tx := range test.expectedOutput {
+ foundTx := false
+ for _, finalTx := range finalTxs {
+ if bytes.Equal(finalTx.Hash().Bytes(), tx.Hash().Bytes()) {
+ foundTx = true
+ }
+ }
+
+ require.True(t, foundTx, "expected tx not found in final txs")
+ }
+ return
+ }
+ require.False(t, test.wantErr == "" && err != nil, "expected error, got nil")
+ require.Contains(t, err.Error(), test.wantErr)
+ })
+ }
+}
+
+func TestValidateAndUnmarshallDepositTx(t *testing.T) {
+ ethservice, serviceV1Alpha1, _, _ := SetupSharedService(t, 10)
+
+ chainDestinationKey, err := crypto.GenerateKey()
+ require.Nil(t, err, "failed to generate chain destination key: %v", err)
+ chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey)
+
+ bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
+ invalidBridgeAssetDenom := "invalid-asset-denom"
+
+ invalidHeightBridgeAssetDenom := "invalid-height-asset-denom"
+ invalidHeightBridgeAddressBech32m := generateBech32MAddress()
+ serviceV1Alpha1.BridgeAddresses()[invalidHeightBridgeAddressBech32m] = ¶ms.AstriaBridgeAddressConfig{
+ AssetDenom: invalidHeightBridgeAssetDenom,
+ StartHeight: 100,
+ }
+
+ bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
+
+ tests := []struct {
+ description string
+ sequencerTx *sequencerblockv1.Deposit
+ // just check if error contains the string since error contains other details
+ wantErr string
+ }{
+ {
+ description: "deposit tx with an unknown bridge address",
+ sequencerTx: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: generateBech32MAddress(),
+ },
+ Asset: bridgeAssetDenom,
+ Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
+ RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ },
+ wantErr: "unknown bridge address",
+ },
+ {
+ description: "deposit tx with a disallowed asset id",
+ sequencerTx: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: bridgeAddress,
+ },
+ Asset: invalidBridgeAssetDenom,
+ Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
+ RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ },
+ wantErr: "disallowed asset",
+ },
+ {
+ description: "deposit tx with a height and asset below the bridge start height",
+ sequencerTx: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: invalidHeightBridgeAddressBech32m,
+ },
+ Asset: invalidHeightBridgeAssetDenom,
+ Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
+ RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ },
+ wantErr: "not allowed before height",
+ },
+ {
+ description: "valid deposit tx",
+ sequencerTx: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: bridgeAddress,
+ },
+ Asset: bridgeAssetDenom,
+ Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
+ RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ },
+ wantErr: "",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.description, func(t *testing.T) {
+ _, err := validateAndUnmarshalDepositTx(test.sequencerTx, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets())
+ if test.wantErr == "" && err == nil {
+ return
+ }
+ require.False(t, test.wantErr == "" && err != nil, "expected error, got nil")
+ require.Contains(t, err.Error(), test.wantErr)
+ })
+ }
+}
+
+func TestValidateAndUnmarshallSequenceAction(t *testing.T) {
+ blobTx, err := testBlobTx().MarshalBinary()
+ require.Nil(t, err, "failed to marshal random blob tx: %v", err)
+
+ depositTx, err := testDepositTx().MarshalBinary()
+ require.Nil(t, err, "failed to marshal random deposit tx: %v", err)
+
+ tx1 := transaction(0, 1000, TestKey)
+ validMarshalledTx, err := tx1.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ tests := []struct {
+ description string
+ sequencerTx *sequencerblockv1.RollupData
+ // just check if error contains the string since errors can contains other details
+ wantErr string
+ }{
+ {
+ description: "unmarshallable sequencer tx",
+ sequencerTx: &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: []byte("unmarshallable tx"),
+ },
+ },
+ wantErr: "failed to unmarshal sequenced data into transaction",
+ },
+ {
+ description: "blob type sequence tx",
+ sequencerTx: &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: blobTx,
+ },
+ },
+ wantErr: "blob tx not allowed in sequenced data",
+ },
+ {
+ description: "deposit type sequence tx",
+ sequencerTx: &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: depositTx,
+ },
+ },
+ wantErr: "deposit tx not allowed in sequenced data",
+ },
+ {
+ description: "valid sequencer tx",
+ sequencerTx: &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: validMarshalledTx},
+ },
+ wantErr: "",
+ },
+ }
+
+ for _, test := range tests {
+ t.Run(test.description, func(t *testing.T) {
+ _, err := validateAndUnmarshallSequenceAction(test.sequencerTx)
+ if test.wantErr == "" && err == nil {
+ return
+ }
+ require.False(t, test.wantErr == "" && err != nil, "expected error, got nil")
+ require.Contains(t, err.Error(), test.wantErr)
+ })
+ }
+}
+
+func TestUnbundleRollupData(t *testing.T) {
+ ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10)
+
+ addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix
+
+ baseSequencerBlockHash := []byte("sequencer block hash")
+ prevRollupBlockHash := []byte("prev rollup block hash")
+
+ // txs in
+ tx1 := transaction(0, 1000, TestKey)
+ tx2 := transaction(1, 1000, TestKey)
+ tx3 := transaction(2, 1000, TestKey)
+ tx4 := transaction(3, 1000, TestKey)
+ tx5 := transaction(4, 1000, TestKey)
+
+ validMarshalledTx1, err := tx1.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx2, err := tx2.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx3, err := tx3.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx4, err := tx4.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx5, err := tx5.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ bid := &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3},
+ SequencerParentBlockHash: baseSequencerBlockHash,
+ RollupParentBlockHash: prevRollupBlockHash,
+ }
+
+ marshalledBid, err := proto.Marshal(bid)
+ require.NoError(t, err, "failed to marshal payload: %v", err)
+ signedBid, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{
+ Hash: 0,
+ Context: "",
+ })
+ require.NoError(t, err, "failed to sign payload: %v", err)
+
+ // TODO - we need better naming here!
+ finalBid, err := anypb.New(bid)
+
+ allocation := &auctionv1alpha1.Allocation{
+ Signature: signedBid,
+ PublicKey: auctioneerPubKey,
+ Bid: finalBid,
+ }
+
+ marshalledAllocation, err := proto.Marshal(allocation)
+ require.NoError(t, err, "failed to marshal allocation: %v", err)
+ allocationSequenceData := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: marshalledAllocation,
+ },
+ }
+ seqData1 := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: validMarshalledTx4,
+ },
+ }
+ seqData2 := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: validMarshalledTx5,
+ },
+ }
+
+ bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
+ bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
+ chainDestinationKey, err := crypto.GenerateKey()
+ require.Nil(t, err, "failed to generate chain destination key: %v", err)
+ chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey)
+
+ depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: bridgeAddress,
+ },
+ Asset: bridgeAssetDenom,
+ Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
+ RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ }}}
+
+ finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, depositTx}
+
+ txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix)
+
+ require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process")
+
+ // allocation txs should be the first 3
+ require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first")
+ require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second")
+ require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third")
+ require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth")
+ require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth")
+}
+
+func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) {
+ ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10)
+ addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix
+
+ baseSequencerBlockHash := []byte("sequencer block hash")
+ prevRollupBlockHash := []byte("prev rollup block hash")
+
+ // txs in
+ tx1 := transaction(0, 1000, TestKey)
+ tx2 := transaction(1, 1000, TestKey)
+ tx3 := transaction(2, 1000, TestKey)
+ tx4 := transaction(3, 1000, TestKey)
+ tx5 := transaction(4, 1000, TestKey)
+
+ validMarshalledTx1, err := tx1.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx2, err := tx2.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx3, err := tx3.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx4, err := tx4.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx5, err := tx5.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ bid := &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3},
+ SequencerParentBlockHash: baseSequencerBlockHash,
+ RollupParentBlockHash: prevRollupBlockHash,
+ }
+
+ marshalledBid, err := proto.Marshal(bid)
+ require.NoError(t, err, "failed to marshal payload: %v", err)
+ signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{
+ Hash: 0,
+ Context: "",
+ })
+ require.NoError(t, err, "failed to sign payload: %v", err)
+
+ finalBid, err := anypb.New(bid)
+ require.NoError(t, err, "failed to convert bid to anypb: %v", err)
+
+ allocation := &auctionv1alpha1.Allocation{
+ Signature: signedPayload,
+ PublicKey: auctioneerPubKey,
+ Bid: finalBid,
+ }
+
+ marshalledAllocation, err := proto.Marshal(allocation)
+ require.NoError(t, err, "failed to marshal allocation: %v", err)
+ allocationSequenceData := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: marshalledAllocation,
+ },
+ }
+ // this allocation should be ignored
+ allocationSequenceData2 := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: marshalledAllocation,
+ },
+ }
+ seqData1 := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: validMarshalledTx4,
+ },
+ }
+ seqData2 := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: validMarshalledTx5,
+ },
+ }
+
+ bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
+ bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
+ chainDestinationKey, err := crypto.GenerateKey()
+ require.Nil(t, err, "failed to generate chain destination key: %v", err)
+ chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey)
+
+ depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: bridgeAddress,
+ },
+ Asset: bridgeAssetDenom,
+ Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
+ RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ }}}
+
+ finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, allocationSequenceData2, depositTx}
+
+ txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix)
+
+ require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process")
+
+ // allocation txs should be the first 3
+ require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first")
+ require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second")
+ require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third")
+ require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth")
+ require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth")
+}
+
+func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) {
+ ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10)
+ addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix
+
+ baseSequencerBlockHash := []byte("sequencer block hash")
+ prevRollupBlockHash := []byte("prev rollup block hash")
+
+ _, invalidAuctioneerprivkey, err := ed25519.GenerateKey(nil)
+ require.Nil(t, err, "failed to generate invalid auctioneer key: %v", err)
+
+ // txs in
+ tx1 := transaction(0, 1000, TestKey)
+ tx2 := transaction(1, 1000, TestKey)
+ tx3 := transaction(2, 1000, TestKey)
+ tx4 := transaction(3, 1000, TestKey)
+ tx5 := transaction(4, 1000, TestKey)
+
+ validMarshalledTx1, err := tx1.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx2, err := tx2.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx3, err := tx3.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx4, err := tx4.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ validMarshalledTx5, err := tx5.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ // transactions that the attacker is trying to get into the top of block
+ invalidTx1 := transaction(5, 1000, TestKey)
+ invalidMarshalledTx1, err := invalidTx1.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+ invalidTx2 := transaction(6, 1000, TestKey)
+ invalidMarshalledTx2, err := invalidTx2.MarshalBinary()
+ require.NoError(t, err, "failed to marshal valid tx: %v", err)
+
+ bid := &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3},
+ SequencerParentBlockHash: baseSequencerBlockHash,
+ RollupParentBlockHash: prevRollupBlockHash,
+ }
+ validBidAny, err := anypb.New(bid)
+ require.NoError(t, err, "failed to convert bid to anypb: %v", err)
+
+ marshalledBid, err := proto.Marshal(bid)
+ require.NoError(t, err, "failed to marshal allocation: %v", err)
+ signedBid, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{
+ Hash: 0,
+ Context: "",
+ })
+ require.NoError(t, err, "failed to sign allocation: %v", err)
+
+ invalidBid := &auctionv1alpha1.Bid{
+ Fee: 100,
+ Transactions: [][]byte{invalidMarshalledTx1, invalidMarshalledTx2},
+ SequencerParentBlockHash: baseSequencerBlockHash,
+ RollupParentBlockHash: prevRollupBlockHash,
+ }
+ invalidBidAny, err := anypb.New(invalidBid)
+ require.NoError(t, err, "failed to convert bid to anypb: %v", err)
+
+ marshalledInvalidBid, err := proto.Marshal(invalidBid)
+ require.NoError(t, err, "failed to marshal invalid allocation: %v", err)
+
+ signedInvalidBid, err := invalidAuctioneerprivkey.Sign(nil, marshalledInvalidBid, &ed25519.Options{
+ Hash: 0,
+ Context: "",
+ })
+ require.NoError(t, err, "failed to sign allocation: %v", err)
+
+ allocation := &auctionv1alpha1.Allocation{
+ Signature: signedBid,
+ PublicKey: auctioneerPubKey,
+ Bid: validBidAny,
+ }
+
+ marshalledAllocation, err := proto.Marshal(allocation)
+ require.NoError(t, err, "failed to marshal allocation: %v", err)
+
+ invalidAllocation := &auctionv1alpha1.Allocation{
+ Signature: signedInvalidBid,
+ // trying to spoof the actual auctioneer key
+ PublicKey: auctioneerPubKey,
+ Bid: invalidBidAny,
+ }
+ marshalledInvalidAllocation, err := proto.Marshal(invalidAllocation)
+ require.NoError(t, err, "failed to marshal invalid allocation: %v", err)
+
+ allocationSequenceData := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: marshalledAllocation,
+ },
+ }
+ // this allocation should be ignored
+ invalidAllocationSequenceData := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: marshalledInvalidAllocation,
+ },
+ }
+ seqData1 := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: validMarshalledTx4,
+ },
+ }
+ seqData2 := &sequencerblockv1.RollupData{
+ Value: &sequencerblockv1.RollupData_SequencedData{
+ SequencedData: validMarshalledTx5,
+ },
+ }
+
+ bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress
+ bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom
+ chainDestinationKey, err := crypto.GenerateKey()
+ require.Nil(t, err, "failed to generate chain destination key: %v", err)
+ chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey)
+
+ depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{
+ BridgeAddress: &primitivev1.Address{
+ Bech32M: bridgeAddress,
+ },
+ Asset: bridgeAssetDenom,
+ Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)),
+ RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)},
+ DestinationChainAddress: chainDestinationAddress.String(),
+ SourceTransactionId: &primitivev1.TransactionId{
+ Inner: "test_tx_hash",
+ },
+ SourceActionIndex: 0,
+ }}}
+
+ finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, invalidAllocationSequenceData, depositTx}
+
+ txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix)
+
+ require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process")
+
+ // allocation txs should be the first 3
+ require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first")
+ require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second")
+ require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third")
+ require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth")
+ require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth")
+}
diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go
index ffa20fb62..7086f8156 100644
--- a/internal/ethapi/api.go
+++ b/internal/ethapi/api.go
@@ -834,6 +834,7 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m
// - When blockNr is -2 the chain latest block is returned.
// - When blockNr is -3 the chain finalized block is returned.
// - When blockNr is -4 the chain safe block is returned.
+// - When blockNr is -5 the chain optimistic block is returned.
// - When fullTx is true all transactions in the block are returned, otherwise
// only the transaction hash is returned.
func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) {
diff --git a/miner/miner_test.go b/miner/miner_test.go
index 3dc39f175..7cd74cb70 100644
--- a/miner/miner_test.go
+++ b/miner/miner_test.go
@@ -59,11 +59,12 @@ func (m *mockBackend) TxPool() *txpool.TxPool {
}
type testBlockChain struct {
- root common.Hash
- config *params.ChainConfig
- statedb *state.StateDB
- gasLimit uint64
- chainHeadFeed *event.Feed
+ root common.Hash
+ config *params.ChainConfig
+ statedb *state.StateDB
+ gasLimit uint64
+ chainHeadFeed *event.Feed
+ chainOptimisticHeadFeed *event.Feed
}
func (bc *testBlockChain) Config() *params.ChainConfig {
@@ -94,7 +95,7 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent)
}
func (bc *testBlockChain) SubscribeChainOptimisticHeadEvent(ch chan<- core.ChainOptimisticHeadEvent) event.Subscription {
- return bc.chainHeadFeed.Subscribe(ch)
+ return bc.chainOptimisticHeadFeed.Subscribe(ch)
}
func TestBuildPendingBlocks(t *testing.T) {
@@ -161,10 +162,10 @@ func createMiner(t *testing.T) *Miner {
t.Fatalf("can't create new chain %v", err)
}
statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil)
- blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)}
+ blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed), new(event.Feed)}
- pool := legacypool.New(testTxPoolConfig, blockchain)
- txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool})
+ pool := legacypool.New(testTxPoolConfig, blockchain, true)
+ txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool}, true)
// Create Miner
backend := NewMockBackend(bc, txpool)
diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go
index 3ba7b6ccc..8e7f46e3a 100644
--- a/miner/payload_building_test.go
+++ b/miner/payload_building_test.go
@@ -124,8 +124,8 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine
if err != nil {
t.Fatalf("core.NewBlockChain failed: %v", err)
}
- pool := legacypool.New(testTxPoolConfig, chain)
- txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool})
+ pool := legacypool.New(testTxPoolConfig, chain, true)
+ txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool}, true)
return &testWorkerBackend{
db: db,
diff --git a/node/config.go b/node/config.go
index d1e29baa0..11a54c1e6 100644
--- a/node/config.go
+++ b/node/config.go
@@ -217,6 +217,8 @@ type Config struct {
EnablePersonal bool `toml:"-"`
DBEngine string `toml:",omitempty"`
+
+ EnableAuctioneer bool `toml:",omitempty"`
}
// IPCEndpoint resolves an IPC endpoint based on a configured value, taking into
@@ -273,7 +275,7 @@ func (c *Config) HTTPEndpoint() string {
return net.JoinHostPort(c.HTTPHost, fmt.Sprintf("%d", c.HTTPPort))
}
-// GRPCEndpoint resolves a gRPC endpoint based on the configured host interface
+// GRPCEndpoint resolves a gRPC TCP endpoint based on the configured host interface
// and port parameters.
func (c *Config) GRPCEndpoint() string {
if c.GRPCHost == "" {
diff --git a/node/defaults.go b/node/defaults.go
index 326ed2373..fd3d45e01 100644
--- a/node/defaults.go
+++ b/node/defaults.go
@@ -35,8 +35,8 @@ const (
DefaultAuthHost = "localhost" // Default host interface for the authenticated apis
DefaultAuthPort = 8551 // Default port for the authenticated apis
// grpc
- DefaultGRPCHost = "[::1]" // Default host interface for the gRPC server
- DefaultGRPCPort = 50051 // Default port for the gRPC server
+ DefaultGRPCHost = "[::1]" // Default host interface for the gRPC server for the execution api
+ DefaultGRPCPort = 50051 // Default port for the gRPC server for the execution api
)
const (
diff --git a/node/grpcstack.go b/node/grpcstack.go
index 86ebc8b5f..15000f9ab 100644
--- a/node/grpcstack.go
+++ b/node/grpcstack.go
@@ -1,6 +1,7 @@
package node
import (
+ optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc"
"net"
"sync"
@@ -15,25 +16,36 @@ type GRPCServerHandler struct {
mu sync.Mutex
endpoint string
- server *grpc.Server
+ execServer *grpc.Server
executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer
+ optimisticExecServ *optimisticGrpc.OptimisticExecutionServiceServer
+ auctionServiceServ *optimisticGrpc.AuctionServiceServer
+
+ enableAuctioneer bool
}
// NewServer creates a new gRPC server.
// It registers the execution service server.
// It registers the gRPC server with the node so it can be stopped on shutdown.
-func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, cfg *Config) error {
- server := grpc.NewServer()
+func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, auctionServiceServ optimisticGrpc.AuctionServiceServer, cfg *Config) error {
+ execServer := grpc.NewServer()
log.Info("gRPC server enabled", "endpoint", cfg.GRPCEndpoint())
serverHandler := &GRPCServerHandler{
endpoint: cfg.GRPCEndpoint(),
- server: server,
+ execServer: execServer,
executionServiceServerV1a2: &execServ,
+ optimisticExecServ: &optimisticExecServ,
+ auctionServiceServ: &auctionServiceServ,
+ enableAuctioneer: cfg.EnableAuctioneer,
}
- astriaGrpc.RegisterExecutionServiceServer(server, execServ)
+ astriaGrpc.RegisterExecutionServiceServer(execServer, execServ)
+ if cfg.EnableAuctioneer {
+ optimisticGrpc.RegisterOptimisticExecutionServiceServer(execServer, optimisticExecServ)
+ optimisticGrpc.RegisterAuctionServiceServer(execServer, auctionServiceServ)
+ }
node.RegisterGRPCServer(serverHandler)
return nil
@@ -49,11 +61,13 @@ func (handler *GRPCServerHandler) Start() error {
}
// Start the gRPC server
- lis, err := net.Listen("tcp", handler.endpoint)
+ tcpLis, err := net.Listen("tcp", handler.endpoint)
if err != nil {
return err
}
- go handler.server.Serve(lis)
+
+ go handler.execServer.Serve(tcpLis)
+
log.Info("gRPC server started", "endpoint", handler.endpoint)
return nil
}
@@ -63,7 +77,8 @@ func (handler *GRPCServerHandler) Stop() error {
handler.mu.Lock()
defer handler.mu.Unlock()
- handler.server.GracefulStop()
+ handler.execServer.GracefulStop()
+
log.Info("gRPC server stopped", "endpoint", handler.endpoint)
return nil
}
diff --git a/node/node.go b/node/node.go
index 896763033..0b19df5db 100644
--- a/node/node.go
+++ b/node/node.go
@@ -69,6 +69,8 @@ type Node struct {
// grpc
grpcServerHandler *GRPCServerHandler // Stores information about the grpc server
+ enableAuctioneer bool
+
databases map[*closeTrackingDB]struct{} // All open databases
}
@@ -159,6 +161,10 @@ func New(conf *Config) (*Node, error) {
node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts)
node.ipc = newIPCServer(node.log, conf.IPCEndpoint())
+ if conf.EnableAuctioneer {
+ node.enableAuctioneer = true
+ }
+
return node, nil
}
@@ -724,7 +730,7 @@ func (n *Node) HTTPEndpoint() string {
return "http://" + n.http.listenAddr()
}
-// GRPCENDPOINT returns the URL of the GRPC server.
+// GRPCEndpoint returns the URL of the GRPC server.
func (n *Node) GRPCEndpoint() string {
return "http://" + n.grpcServerHandler.endpoint
}
@@ -756,6 +762,10 @@ func (n *Node) EventMux() *event.TypeMux {
return n.eventmux
}
+func (n *Node) AuctioneerEnabled() bool {
+ return n.enableAuctioneer
+}
+
// OpenDatabase opens an existing database with the given name (or creates one if no
// previous can be found) from within the node's instance directory. If the node is
// ephemeral, a memory database is returned.
diff --git a/params/config.go b/params/config.go
index d9b40b881..7d9ee2a79 100644
--- a/params/config.go
+++ b/params/config.go
@@ -388,6 +388,7 @@ type ChainConfig struct {
AstriaBridgeAddressConfigs []AstriaBridgeAddressConfig `json:"astriaBridgeAddresses,omitempty"`
AstriaFeeCollectors map[uint32]common.Address `json:"astriaFeeCollectors"`
AstriaEIP1559Params *AstriaEIP1559Params `json:"astriaEIP1559Params,omitempty"`
+ AstriaAuctioneerAddresses map[uint32]string `json:"astriaAuctioneerAddresses,omitempty"`
}
func (c *ChainConfig) AstriaExtraData() []byte {
diff --git a/rpc/types.go b/rpc/types.go
index 2e53174b8..249efc51a 100644
--- a/rpc/types.go
+++ b/rpc/types.go
@@ -63,11 +63,12 @@ type jsonWriter interface {
type BlockNumber int64
const (
- SafeBlockNumber = BlockNumber(-4)
- FinalizedBlockNumber = BlockNumber(-3)
- LatestBlockNumber = BlockNumber(-2)
- PendingBlockNumber = BlockNumber(-1)
- EarliestBlockNumber = BlockNumber(0)
+ OptimisticBlockNumber = BlockNumber(-5)
+ SafeBlockNumber = BlockNumber(-4)
+ FinalizedBlockNumber = BlockNumber(-3)
+ LatestBlockNumber = BlockNumber(-2)
+ PendingBlockNumber = BlockNumber(-1)
+ EarliestBlockNumber = BlockNumber(0)
)
// UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports:
@@ -98,6 +99,9 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error {
case "safe":
*bn = SafeBlockNumber
return nil
+ case "optimistic":
+ *bn = OptimisticBlockNumber
+ return nil
}
blckNum, err := hexutil.DecodeUint64(input)
@@ -135,6 +139,8 @@ func (bn BlockNumber) String() string {
return "finalized"
case SafeBlockNumber:
return "safe"
+ case OptimisticBlockNumber:
+ return "optimistic"
default:
if bn < 0 {
return fmt.Sprintf("", bn)
@@ -188,6 +194,10 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error {
bn := SafeBlockNumber
bnh.BlockNumber = &bn
return nil
+ case "optimistic":
+ bn := OptimisticBlockNumber
+ bnh.BlockNumber = &bn
+ return nil
default:
if len(input) == 66 {
hash := common.Hash{}