diff --git a/.github/workflows/astria-build-and-publish-image.yml b/.github/workflows/astria-build-and-publish-image.yml index 73654354e..3329474bb 100644 --- a/.github/workflows/astria-build-and-publish-image.yml +++ b/.github/workflows/astria-build-and-publish-image.yml @@ -72,4 +72,4 @@ jobs: push: true tags: ${{ steps.metadata.outputs.tags }} labels: ${{ steps.metadata.outputs.labels }} - project: w2d6w0spqz \ No newline at end of file + project: w2d6w0spqz diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 162655190..a417a9753 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -123,6 +123,7 @@ var ( utils.MinerRecommitIntervalFlag, utils.MinerPendingFeeRecipientFlag, utils.MinerNewPayloadTimeoutFlag, // deprecated + utils.AuctioneerEnabledFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV4Flag, diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 4d55de18f..f2410a9ce 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -18,7 +18,7 @@ package utils import ( - optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc" "context" "crypto/ecdsa" "encoding/hex" @@ -770,6 +770,13 @@ var ( Category: flags.APICategory, } + // auctioneer + AuctioneerEnabledFlag = &cli.BoolFlag{ + Name: "auctioneer", + Usage: "Enable the auctioneer server", + Category: flags.MinerCategory, + } + // Network Settings MaxPeersFlag = &cli.IntFlag{ Name: "maxpeers", @@ -1439,6 +1446,12 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { SetDataDir(ctx, cfg) setSmartCard(ctx, cfg) + if ctx.Bool(AuctioneerEnabledFlag.Name) { + cfg.EnableAuctioneer = true + } else { + cfg.EnableAuctioneer = false + } + if ctx.IsSet(JWTSecretFlag.Name) { cfg.JWTSecret = ctx.String(JWTSecretFlag.Name) } @@ -1990,8 +2003,8 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst // RegisterGRPCServices adds the gRPC API to the node. // It was done this way so that our grpc execution server can access the ethapi.Backend -func RegisterGRPCServices(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecutionServ optimisticGrpc.OptimisticExecutionServiceServer, bundleStreamingServ optimisticGrpc.BundleServiceServer, cfg *node.Config) { - if err := node.NewGRPCServerHandler(stack, execServ, optimisticExecutionServ, bundleStreamingServ, cfg); err != nil { +func RegisterGRPCServices(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecutionServ optimisticGrpc.OptimisticExecutionServiceServer, auctionServiceServer optimisticGrpc.AuctionServiceServer, cfg *node.Config) { + if err := node.NewGRPCServerHandler(stack, execServ, optimisticExecutionServ, auctionServiceServer, cfg); err != nil { Fatalf("Failed to register the gRPC service: %v", err) } } diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index b6e769452..b1397292a 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -239,6 +239,8 @@ type LegacyPool struct { initDoneCh chan struct{} // is closed once the pool is initialized (for tests) changesSinceReorg int // A counter for how many drops we've performed in-between reorg. + + auctioneerEnabled bool } type txpoolResetRequest struct { @@ -247,26 +249,27 @@ type txpoolResetRequest struct { // New creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func New(config Config, chain BlockChain) *LegacyPool { +func New(config Config, chain BlockChain, auctioneerEnabled bool) *LegacyPool { // Sanitize the input to ensure no vulnerable gas prices are set config = (&config).sanitize() // Create the transaction pool with its initial settings pool := &LegacyPool{ - config: config, - chain: chain, - chainconfig: chain.Config(), - signer: types.LatestSigner(chain.Config()), - pending: make(map[common.Address]*list), - queue: make(map[common.Address]*list), - beats: make(map[common.Address]time.Time), - all: newLookup(), - reqResetCh: make(chan *txpoolResetRequest), - reqPromoteCh: make(chan *accountSet), - queueTxEventCh: make(chan *types.Transaction), - reorgDoneCh: make(chan chan struct{}), - reorgShutdownCh: make(chan struct{}), - initDoneCh: make(chan struct{}), + config: config, + chain: chain, + chainconfig: chain.Config(), + signer: types.LatestSigner(chain.Config()), + pending: make(map[common.Address]*list), + queue: make(map[common.Address]*list), + beats: make(map[common.Address]time.Time), + all: newLookup(), + reqResetCh: make(chan *txpoolResetRequest), + reqPromoteCh: make(chan *accountSet), + queueTxEventCh: make(chan *types.Transaction), + reorgDoneCh: make(chan chan struct{}), + reorgShutdownCh: make(chan struct{}), + initDoneCh: make(chan struct{}), + auctioneerEnabled: auctioneerEnabled, } pool.locals = newAccountSet(pool.signer) for _, addr := range config.Locals { @@ -1373,8 +1376,16 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, } pool.mu.Lock() if reset != nil { - // Reset from the old head to the new, rescheduling any reorged transactions - pool.reset(reset.oldHead, reset.newHead) + // only reset the state root and the head of the txpool when we are running the auctioneer node. + // when we are not running the auctioneer node, we re-inject any re-orged transactions which is similar + // to the current functionality of geth + if pool.auctioneerEnabled { + // only reset from the old head to the new head + pool.resetHeadOnly(reset.oldHead, reset.newHead) + } else { + // Reset from the old head to the new, rescheduling any reorged transactions + pool.reset(reset.oldHead, reset.newHead) + } // Nonces were reset, discard any events that became stale for addr := range events { @@ -1395,7 +1406,13 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // remove any transaction that has been included in the block or was invalidated // because of another transaction (e.g. higher gas price). if reset != nil { - pool.clearPendingAndQueued(reset.newHead) + if pool.auctioneerEnabled { + // if we are running the pool as an auctioneer, then we should clear the mempool each time the head + // is reset + pool.clearPendingAndQueued(reset.newHead) + } else { + pool.demoteUnexecutables() + } if reset.newHead != nil { if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) @@ -1765,9 +1782,12 @@ func (pool *LegacyPool) truncateQueue() { // it assumes that the pool lock is being held func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { // Iterate over all accounts and demote any non-executable transactions + addrsForWhichTxsRemoved := map[common.Address]bool{} + for addr, list := range pool.pending { dropped, invalids := list.ClearList() - pendingGauge.Dec(int64(len(dropped) + len(invalids))) + + pendingGauge.Dec(int64(dropped.Len() + invalids.Len())) for _, tx := range dropped { pool.all.Remove(tx.Hash()) @@ -1779,12 +1799,14 @@ func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { if list.Empty() { delete(pool.pending, addr) delete(pool.beats, addr) + + addrsForWhichTxsRemoved[addr] = true } } for addr, list := range pool.queue { dropped, invalids := list.ClearList() - queuedGauge.Dec(int64(len(dropped) + len(invalids))) + queuedGauge.Dec(int64(dropped.Len() + invalids.Len())) for _, tx := range dropped { pool.all.Remove(tx.Hash()) @@ -1794,12 +1816,15 @@ func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { } if list.Empty() { - if _, ok := pool.queue[addr]; !ok { - pool.reserve(addr, false) - } delete(pool.queue, addr) + + addrsForWhichTxsRemoved[addr] = true } } + + for addr := range addrsForWhichTxsRemoved { + pool.reserve(addr, false) + } } // demoteUnexecutables removes invalid and processed transactions from the pools diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/legacypool/legacypool2_test.go index fd961d1d9..d0e1d0e04 100644 --- a/core/txpool/legacypool/legacypool2_test.go +++ b/core/txpool/legacypool/legacypool2_test.go @@ -85,7 +85,7 @@ func TestTransactionFutureAttack(t *testing.T) { config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() fillPool(t, pool) @@ -119,7 +119,7 @@ func TestTransactionFuture1559(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -152,7 +152,7 @@ func TestTransactionZAttack(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() // Create a number of test accounts, fund them and make transactions @@ -223,7 +223,7 @@ func BenchmarkFutureAttack(b *testing.B) { config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() fillPool(b, pool) diff --git a/core/txpool/legacypool/legacypool_no_auctioneer_test.go b/core/txpool/legacypool/legacypool_no_auctioneer_test.go new file mode 100644 index 000000000..1f70b5334 --- /dev/null +++ b/core/txpool/legacypool/legacypool_no_auctioneer_test.go @@ -0,0 +1,2534 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package legacypool + +import ( + "crypto/ecdsa" + "errors" + "math/big" + "math/rand" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +func init() { + testTxPoolConfig = DefaultConfig + testTxPoolConfig.Journal = "" + + cpy := *params.TestChainConfig + eip1559Config = &cpy + eip1559Config.BerlinBlock = common.Big0 + eip1559Config.LondonBlock = common.Big0 +} + +// This test simulates a scenario where a new block is imported during a +// state reset and tests whether the pending state is in sync with the +// block head event that initiated the resetState(). +func TestStateChangeDuringResetNoAuctioneer(t *testing.T) { + t.Parallel() + + var ( + key, _ = crypto.GenerateKey() + address = crypto.PubkeyToAddress(key.PublicKey) + statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + trigger = false + ) + + // setup pool with 2 transaction in it + statedb.SetBalance(address, new(uint256.Int).SetUint64(params.Ether), tracing.BalanceChangeUnspecified) + blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger} + + tx0 := transaction(0, 100000, key) + tx1 := transaction(1, 100000, key) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + nonce := pool.Nonce(address) + if nonce != 0 { + t.Fatalf("Invalid nonce, want 0, got %d", nonce) + } + + pool.addRemotesSync([]*types.Transaction{tx0, tx1}) + + nonce = pool.Nonce(address) + if nonce != 2 { + t.Fatalf("Invalid nonce, want 2, got %d", nonce) + } + + // trigger state change in the background + trigger = true + <-pool.requestReset(nil, nil) + + nonce = pool.Nonce(address) + if nonce != 2 { + t.Fatalf("Invalid nonce, want 2, got %d", nonce) + } +} + +func TestInvalidTransactionsNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx := transaction(0, 100, key) + from, _ := deriveSender(tx) + + // Intrinsic gas too low + testAddBalance(pool, from, big.NewInt(1)) + if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + + // Insufficient funds + tx = transaction(0, 100000, key) + if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + + testSetNonce(pool, from, 1) + testAddBalance(pool, from, big.NewInt(0xffffffffffffff)) + tx = transaction(0, 100000, key) + if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + + tx = transaction(1, 100000, key) + pool.gasTip.Store(uint256.NewInt(1000)) + if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + if err := pool.addLocal(tx); err != nil { + t.Error("expected", nil, "got", err) + } +} + +func TestQueueNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx := transaction(0, 100, key) + from, _ := deriveSender(tx) + testAddBalance(pool, from, big.NewInt(1000)) + <-pool.requestReset(nil, nil) + + pool.enqueueTx(tx.Hash(), tx, false, true) + <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + if len(pool.pending) != 1 { + t.Error("expected valid txs to be 1 is", len(pool.pending)) + } + + tx = transaction(1, 100, key) + from, _ = deriveSender(tx) + testSetNonce(pool, from, 2) + pool.enqueueTx(tx.Hash(), tx, false, true) + + <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { + t.Error("expected transaction to be in tx pool") + } + if len(pool.queue) > 0 { + t.Error("expected transaction queue to be empty. is", len(pool.queue)) + } +} + +func TestQueue2NoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx1 := transaction(0, 100, key) + tx2 := transaction(10, 100, key) + tx3 := transaction(11, 100, key) + from, _ := deriveSender(tx1) + testAddBalance(pool, from, big.NewInt(1000)) + pool.reset(nil, nil) + + pool.enqueueTx(tx1.Hash(), tx1, false, true) + pool.enqueueTx(tx2.Hash(), tx2, false, true) + pool.enqueueTx(tx3.Hash(), tx3, false, true) + + pool.promoteExecutables([]common.Address{from}) + if len(pool.pending) != 1 { + t.Error("expected pending length to be 1, got", len(pool.pending)) + } + if pool.queue[from].Len() != 2 { + t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) + } +} + +func TestNegativeValueNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) + from, _ := deriveSender(tx) + testAddBalance(pool, from, big.NewInt(1)) + if err := pool.addRemote(tx); err != txpool.ErrNegativeValue { + t.Error("expected", txpool.ErrNegativeValue, "got", err) + } +} + +func TestTipAboveFeeCapNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) + + if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap { + t.Error("expected", core.ErrTipAboveFeeCap, "got", err) + } +} + +func TestVeryHighValuesNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + veryBigNumber := big.NewInt(1) + veryBigNumber.Lsh(veryBigNumber, 300) + + tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) + if err := pool.addRemote(tx); err != core.ErrTipVeryHigh { + t.Error("expected", core.ErrTipVeryHigh, "got", err) + } + + tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) + if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh { + t.Error("expected", core.ErrFeeCapVeryHigh, "got", err) + } +} + +func TestChainForkNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + resetState := func() { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified) + + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + <-pool.requestReset(nil, nil) + } + resetState() + + tx := transaction(0, 100000, key) + if _, err := pool.add(tx, false); err != nil { + t.Error("didn't expect error", err) + } + pool.removeTx(tx.Hash(), true, true) + + // reset the pool's internal state + resetState() + if _, err := pool.add(tx, false); err != nil { + t.Error("didn't expect error", err) + } +} + +func TestRemoveTxSanityNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + resetState := func() { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified) + + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + <-pool.requestReset(nil, nil) + } + resetState() + + tx1 := transaction(0, 100000, key) + tx2 := transaction(1, 100000, key) + tx3 := transaction(2, 100000, key) + + if err := pool.addLocal(tx1); err != nil { + t.Error("didn't expect error", err) + } + if err := pool.addLocal(tx2); err != nil { + t.Error("didn't expect error", err) + } + if err := pool.addLocal(tx3); err != nil { + t.Error("didn't expect error", err) + } + + pendingTxs := pool.pending[addr] + if pendingTxs.Len() != 3 { + t.Error("expected 3 pending transactions, got", pendingTxs.Len()) + } + + if err := validatePoolInternals(pool); err != nil { + t.Errorf("pool internals validation failed: %v", err) + } + + n := pool.removeTx(tx1.Hash(), false, true) + if n != 3 { + t.Error("expected 3 transactions to be removed, got", n) + } + n = pool.removeTx(tx2.Hash(), false, true) + if n != 0 { + t.Error("expected 0 transactions to be removed, got", n) + } + n = pool.removeTx(tx3.Hash(), false, true) + if n != 0 { + t.Error("expected 0 transactions to be removed, got", n) + } + + if len(pool.pending) != 0 { + t.Error("expected 0 pending transactions, got", pendingTxs.Len()) + } + + if err := validatePoolInternals(pool); err != nil { + t.Errorf("pool internals validation failed: %v", err) + } +} + +func TestDoubleNonceNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + resetState := func() { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified) + + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + <-pool.requestReset(nil, nil) + } + resetState() + + signer := types.HomesteadSigner{} + tx1, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100000, big.NewInt(1), nil), signer, key) + tx2, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(2), nil), signer, key) + tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(1), nil), signer, key) + + // Add the first two transaction, ensure higher priced stays only + if replace, err := pool.add(tx1, false); err != nil || replace { + t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace) + } + if replace, err := pool.add(tx2, false); err != nil || !replace { + t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) + } + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + if pool.pending[addr].Len() != 1 { + t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) + } + if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { + t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) + } + + // Add the third transaction and ensure it's not saved (smaller price) + pool.add(tx3, false) + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + if pool.pending[addr].Len() != 1 { + t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) + } + if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { + t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) + } + // Ensure the total transaction count is correct + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) + } +} + +func TestMissingNonceNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, addr, big.NewInt(100000000000000)) + tx := transaction(1, 100000, key) + if _, err := pool.add(tx, false); err != nil { + t.Error("didn't expect error", err) + } + if len(pool.pending) != 0 { + t.Error("expected 0 pending transactions, got", len(pool.pending)) + } + if pool.queue[addr].Len() != 1 { + t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) + } + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) + } +} + +func TestNonceRecoveryNoAuctioneer(t *testing.T) { + t.Parallel() + + const n = 10 + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + testSetNonce(pool, addr, n) + testAddBalance(pool, addr, big.NewInt(100000000000000)) + <-pool.requestReset(nil, nil) + + tx := transaction(n, 100000, key) + if err := pool.addRemote(tx); err != nil { + t.Error(err) + } + // simulate some weird re-order of transactions and missing nonce(s) + testSetNonce(pool, addr, n-1) + <-pool.requestReset(nil, nil) + if fn := pool.Nonce(addr); fn != n-1 { + t.Errorf("expected nonce to be %d, got %d", n-1, fn) + } +} + +// Tests that if an account runs out of funds, any pending and queued transactions +// are dropped. +func TestDroppingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000)) + + // Add some pending and some queued transactions + var ( + tx0 = transaction(0, 100, key) + tx1 = transaction(1, 200, key) + tx2 = transaction(2, 300, key) + tx10 = transaction(10, 100, key) + tx11 = transaction(11, 200, key) + tx12 = transaction(12, 300, key) + ) + pool.all.Add(tx0, false) + pool.priced.Put(tx0, false) + pool.promoteTx(account, tx0.Hash(), tx0) + + pool.all.Add(tx1, false) + pool.priced.Put(tx1, false) + pool.promoteTx(account, tx1.Hash(), tx1) + + pool.all.Add(tx2, false) + pool.priced.Put(tx2, false) + pool.promoteTx(account, tx2.Hash(), tx2) + + pool.enqueueTx(tx10.Hash(), tx10, false, true) + pool.enqueueTx(tx11.Hash(), tx11, false, true) + pool.enqueueTx(tx12.Hash(), tx12, false, true) + + // Check that pre and post validations leave the pool as is + if pool.pending[account].Len() != 3 { + t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) + } + if pool.queue[account].Len() != 3 { + t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) + } + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) + } + <-pool.requestReset(nil, nil) + if pool.pending[account].Len() != 3 { + t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) + } + if pool.queue[account].Len() != 3 { + t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) + } + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) + } + // Reduce the balance of the account, and check that invalidated transactions are dropped + testAddBalance(pool, account, big.NewInt(-650)) + <-pool.requestReset(nil, nil) + + if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { + t.Errorf("funded pending transaction missing: %v", tx0) + } + if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok { + t.Errorf("funded pending transaction missing: %v", tx0) + } + if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok { + t.Errorf("out-of-fund pending transaction present: %v", tx1) + } + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { + t.Errorf("funded queued transaction missing: %v", tx10) + } + if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok { + t.Errorf("funded queued transaction missing: %v", tx10) + } + if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok { + t.Errorf("out-of-fund queued transaction present: %v", tx11) + } + if pool.all.Count() != 4 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) + } + // Reduce the block gas limit, check that invalidated transactions are dropped + pool.chain.(*testBlockChain).gasLimit.Store(100) + <-pool.requestReset(nil, nil) + + if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { + t.Errorf("funded pending transaction missing: %v", tx0) + } + if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok { + t.Errorf("over-gased pending transaction present: %v", tx1) + } + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { + t.Errorf("funded queued transaction missing: %v", tx10) + } + if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok { + t.Errorf("over-gased queued transaction present: %v", tx11) + } + if pool.all.Count() != 2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2) + } +} + +// Tests that if a transaction is dropped from the current pending pool (e.g. out +// of fund), all consecutive (still valid, but not executable) transactions are +// postponed back into the future queue to prevent broadcasting them. +func TestPostponingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the postponing with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create two test accounts to produce different gap profiles with + keys := make([]*ecdsa.PrivateKey, 2) + accs := make([]common.Address, len(keys)) + + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + accs[i] = crypto.PubkeyToAddress(keys[i].PublicKey) + + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(50100)) + } + // Add a batch consecutive pending transactions for validation + txs := []*types.Transaction{} + for i, key := range keys { + for j := 0; j < 100; j++ { + var tx *types.Transaction + if (i+j)%2 == 0 { + tx = transaction(uint64(j), 25000, key) + } else { + tx = transaction(uint64(j), 50000, key) + } + txs = append(txs, tx) + } + } + for i, err := range pool.addRemotesSync(txs) { + if err != nil { + t.Fatalf("tx %d: failed to add transactions: %v", i, err) + } + } + // Check that pre and post validations leave the pool as is + if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { + t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) + } + if len(pool.queue) != 0 { + t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) + } + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) + } + <-pool.requestReset(nil, nil) + if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { + t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) + } + if len(pool.queue) != 0 { + t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) + } + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) + } + // Reduce the balance of the account, and check that transactions are reorganised + for _, addr := range accs { + testAddBalance(pool, addr, big.NewInt(-1)) + } + <-pool.requestReset(nil, nil) + + // The first account's first transaction remains valid, check that subsequent + // ones are either filtered out, or queued up for later. + if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok { + t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) + } + if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok { + t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) + } + for i, tx := range txs[1:100] { + if i%2 == 1 { + if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx) + } + if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok { + t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx) + } + } else { + if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx) + } + if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx) + } + } + } + // The second account's first transaction got invalid, check that all transactions + // are either filtered out, or queued up for later. + if pool.pending[accs[1]] != nil { + t.Errorf("invalidated account still has pending transactions") + } + for i, tx := range txs[100:] { + if i%2 == 1 { + if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok { + t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx) + } + } else { + if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx) + } + } + } + if pool.all.Count() != len(txs)/2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2) + } +} + +// Tests that if the transaction pool has both executable and non-executable +// transactions from an origin account, filling the nonce gap moves all queued +// ones into the pending pool. +func TestGapFillingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a pending and a queued transaction with a nonce-gap in between + pool.addRemotesSync([]*types.Transaction{ + transaction(0, 100000, key), + transaction(2, 100000, key), + }) + pending, queued := pool.Stats() + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Fill the nonce gap and ensure all transactions become pending + if err := pool.addRemoteSync(transaction(1, 100000, key)); err != nil { + t.Fatalf("failed to add gapped transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("gap-filling event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if the transaction count belonging to a single account goes above +// some threshold, the higher transactions are dropped to prevent DOS attacks. +func TestQueueAccountLimitingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + // Keep queuing up transactions and make sure all above a limit are dropped + for i := uint64(1); i <= testTxPoolConfig.AccountQueue+5; i++ { + if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { + t.Fatalf("tx %d: failed to add transaction: %v", i, err) + } + if len(pool.pending) != 0 { + t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) + } + if i <= testTxPoolConfig.AccountQueue { + if pool.queue[account].Len() != int(i) { + t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) + } + } else { + if pool.queue[account].Len() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), testTxPoolConfig.AccountQueue) + } + } + } + if pool.all.Count() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue) + } +} + +// Tests that if the transaction count belonging to multiple accounts go above +// some threshold, the higher transactions are dropped to prevent DOS attacks. +// +// This logic should not hold for local transactions, unless the local tracking +// mechanism is disabled. +func TestQueueGlobalLimitingNoAuctioneer(t *testing.T) { + testQueueGlobalLimiting(t, false) +} +func TestQueueGlobalLimitingNoLocalsNoAuctioneer(t *testing.T) { + testQueueGlobalLimiting(t, true) +} + +func testQueueGlobalLimiting(t *testing.T, nolocals bool) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.NoLocals = nolocals + config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) + + pool := New(config, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them (last one will be the local) + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + local := keys[len(keys)-1] + + // Generate and queue a batch of transactions + nonces := make(map[common.Address]uint64) + + txs := make(types.Transactions, 0, 3*config.GlobalQueue) + for len(txs) < cap(txs) { + key := keys[rand.Intn(len(keys)-1)] // skip adding transactions with the local account + addr := crypto.PubkeyToAddress(key.PublicKey) + + txs = append(txs, transaction(nonces[addr]+1, 100000, key)) + nonces[addr]++ + } + // Import the batch and verify that limits have been enforced + pool.addRemotesSync(txs) + + queued := 0 + for addr, list := range pool.queue { + if list.Len() > int(config.AccountQueue) { + t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) + } + queued += list.Len() + } + if queued > int(config.GlobalQueue) { + t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) + } + // Generate a batch of transactions from the local account and import them + txs = txs[:0] + for i := uint64(0); i < 3*config.GlobalQueue; i++ { + txs = append(txs, transaction(i+1, 100000, local)) + } + pool.addLocals(txs) + + // If locals are disabled, the previous eviction algorithm should apply here too + if nolocals { + queued := 0 + for addr, list := range pool.queue { + if list.Len() > int(config.AccountQueue) { + t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) + } + queued += list.Len() + } + if queued > int(config.GlobalQueue) { + t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) + } + } else { + // Local exemptions are enabled, make sure the local account owned the queue + if len(pool.queue) != 1 { + t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1) + } + // Also ensure no local transactions are ever dropped, even if above global limits + if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue { + t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue) + } + } +} + +// Tests that if an account remains idle for a prolonged amount of time, any +// non-executable transactions queued up are dropped to prevent wasting resources +// on shuffling them around. +// +// This logic should not hold for local transactions, unless the local tracking +// mechanism is disabled. +func TestQueueTimeLimitingNoAuctioneer(t *testing.T) { + testQueueTimeLimitingNoAuctioneer(t, false) +} +func TestQueueTimeLimitingNoLocalsNoAuctioneer(t *testing.T) { + testQueueTimeLimitingNoAuctioneer(t, true) +} + +func testQueueTimeLimitingNoAuctioneer(t *testing.T, nolocals bool) { + // Reduce the eviction interval to a testable amount + defer func(old time.Duration) { evictionInterval = old }(evictionInterval) + evictionInterval = time.Millisecond * 100 + + // Create the pool to test the non-expiration enforcement + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.Lifetime = time.Second + config.NoLocals = nolocals + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create two test accounts to ensure remotes expire but locals do not + local, _ := crypto.GenerateKey() + remote, _ := crypto.GenerateKey() + + testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000)) + testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) + + // Add the two transactions and ensure they both are queued up + if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + pending, queued := pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Allow the eviction interval to run + time.Sleep(2 * evictionInterval) + + // Transactions should not be evicted from the queue yet since lifetime duration has not passed + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Wait a bit for eviction to run and clean up any leftovers, and ensure only the local remains + time.Sleep(2 * config.Lifetime) + + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if nolocals { + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + } else { + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // remove current transactions and increase nonce to prepare for a reset and cleanup + statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2) + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) + <-pool.requestReset(nil, nil) + + // make sure queue, pending are cleared + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Queue gapped transactions + if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + time.Sleep(5 * evictionInterval) // A half lifetime pass + + // Queue executable transactions, the life cycle should be restarted. + if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + time.Sleep(6 * evictionInterval) + + // All gapped transactions shouldn't be kicked out + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // The whole life time pass after last promotion, kick out stale transactions + time.Sleep(2 * config.Lifetime) + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if nolocals { + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + } else { + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that even if the transaction count belonging to a single account goes +// above some threshold, as long as the transactions are executable, they are +// accepted. +func TestPendingLimitingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000000000)) + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Keep queuing up transactions and make sure all above a limit are dropped + for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ { + if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { + t.Fatalf("tx %d: failed to add transaction: %v", i, err) + } + if pool.pending[account].Len() != int(i)+1 { + t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1) + } + if len(pool.queue) != 0 { + t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) + } + } + if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue+5) + } + if err := validateEvents(events, int(testTxPoolConfig.AccountQueue+5)); err != nil { + t.Fatalf("event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if the transaction count belonging to multiple accounts go above +// some hard threshold, the higher transactions are dropped to prevent DOS +// attacks. +func TestPendingGlobalLimitingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = config.AccountSlots * 10 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions + nonces := make(map[common.Address]uint64) + + txs := types.Transactions{} + for _, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + for j := 0; j < int(config.GlobalSlots)/len(keys)*2; j++ { + txs = append(txs, transaction(nonces[addr], 100000, key)) + nonces[addr]++ + } + } + // Import the batch and verify that limits have been enforced + pool.addRemotesSync(txs) + + pending := 0 + for _, list := range pool.pending { + pending += list.Len() + } + if pending > int(config.GlobalSlots) { + t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Test the limit on transaction size is enforced correctly. +// This test verifies every transaction having allowed size +// is added to the pool, and longer transactions are rejected. +func TestAllowedTxSizeNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000000)) + + // Compute maximal data size for transactions (lower bound). + // + // It is assumed the fields in the transaction (except of the data) are: + // - nonce <= 32 bytes + // - gasTip <= 32 bytes + // - gasLimit <= 32 bytes + // - recipient == 20 bytes + // - value <= 32 bytes + // - signature == 65 bytes + // All those fields are summed up to at most 213 bytes. + baseSize := uint64(213) + dataSize := txMaxSize - baseSize + // Try adding a transaction with maximal allowed size + tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err) + } + // Try adding a transaction with random allowed size + if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { + t.Fatalf("failed to add transaction of random allowed size: %v", err) + } + // Try adding a transaction of minimal not allowed size + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil { + t.Fatalf("expected rejection on slightly oversize transaction") + } + // Try adding a transaction of random not allowed size + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { + t.Fatalf("expected rejection on oversize transaction") + } + // Run some sanity checks on the pool internals + pending, queued := pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if transactions start being capped, transactions are also removed from 'all' +func TestCapClearsFromAllNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.AccountSlots = 2 + config.AccountQueue = 2 + config.GlobalSlots = 8 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, addr, big.NewInt(1000000)) + + txs := types.Transactions{} + for j := 0; j < int(config.GlobalSlots)*2; j++ { + txs = append(txs, transaction(uint64(j), 100000, key)) + } + // Import the batch and verify that limits have been enforced + pool.addRemotes(txs) + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if the transaction count belonging to multiple accounts go above +// some hard threshold, if they are under the minimum guaranteed slot count then +// the transactions are still kept. +func TestPendingMinimumAllowanceNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = 1 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions + nonces := make(map[common.Address]uint64) + + txs := types.Transactions{} + for _, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + for j := 0; j < int(config.AccountSlots)*2; j++ { + txs = append(txs, transaction(nonces[addr], 100000, key)) + nonces[addr]++ + } + } + // Import the batch and verify that limits have been enforced + pool.addRemotesSync(txs) + + for addr, list := range pool.pending { + if list.Len() != int(config.AccountSlots) { + t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that setting the transaction pool gas price to a higher value correctly +// discards everything cheaper than that and moves any gapped transactions back +// from the pending pool to the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestRepricingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 4) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(2), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[0])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[0])) + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[1])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[1])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[1])) + + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[2])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) + txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[2])) + + ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotesSync(txs) + pool.addLocal(ltx) + + pending, queued := pool.Stats() + if pending != 7 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) + } + if queued != 3 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) + } + if err := validateEvents(events, 7); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Reprice the pool and check that underpriced transactions get dropped + pool.SetGasTip(big.NewInt(2)) + + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 5 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Check that we can't add the old transactions back + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // However we can add local underpriced transactions + tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) + if err := pool.addLocal(tx); err != nil { + t.Fatalf("failed to add underpriced local transaction: %v", err) + } + if pending, _ = pool.Stats(); pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("post-reprice local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // And we can fill gaps with properly priced transactions + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { + t.Fatalf("failed to add queued transaction: %v", err) + } + if err := validateEvents(events, 5); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +func TestMinGasPriceEnforcedNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(eip1559Config, 10000000, statedb, new(event.Feed)) + + txPoolConfig := DefaultConfig + txPoolConfig.NoLocals = true + pool := New(txPoolConfig, blockchain, false) + pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000)) + + tx := pricedTransaction(0, 100000, big.NewInt(2), key) + pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1)) + + if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + + if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + + tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key) + pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1)) + + if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + + if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + // Make sure the tx is accepted if locals are enabled + pool.config.NoLocals = false + if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; err != nil { + t.Fatalf("Min tip enforced with locals enabled, error: %v", err) + } +} + +// Tests that setting the transaction pool gas price to a higher value correctly +// discards everything cheaper (legacy & dynamic fee) than that and moves any +// gapped transactions back from the pending pool to the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestRepricingDynamicFeeNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + pool, _ := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 4) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(2), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[0])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[0])) + + txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])) + txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(3), big.NewInt(2), keys[1])) + txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(3), big.NewInt(2), keys[1])) + + txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(2), keys[2])) + txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])) + txs = append(txs, dynamicFeeTx(3, 100000, big.NewInt(2), big.NewInt(2), keys[2])) + + ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotesSync(txs) + pool.addLocal(ltx) + + pending, queued := pool.Stats() + if pending != 7 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) + } + if queued != 3 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) + } + if err := validateEvents(events, 7); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Reprice the pool and check that underpriced transactions get dropped + pool.SetGasTip(big.NewInt(2)) + + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 5 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Check that we can't add the old transactions back + tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // However we can add local underpriced transactions + tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) + if err := pool.addLocal(tx); err != nil { + t.Fatalf("failed to add underpriced local transaction: %v", err) + } + if pending, _ = pool.Stats(); pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("post-reprice local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // And we can fill gaps with properly priced transactions + tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2]) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add queued transaction: %v", err) + } + if err := validateEvents(events, 5); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that setting the transaction pool gas price to a higher value does not +// remove local transactions (legacy & dynamic fee). +func TestRepricingKeepsLocalsNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 3) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000)) + } + // Create transaction (both pending and queued) with a linearly growing gasprice + for i := uint64(0); i < 500; i++ { + // Add pending transaction. + pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2]) + if err := pool.addLocal(pendingTx); err != nil { + t.Fatal(err) + } + // Add queued transaction. + queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2]) + if err := pool.addLocal(queuedTx); err != nil { + t.Fatal(err) + } + + // Add pending dynamic fee transaction. + pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) + if err := pool.addLocal(pendingTx); err != nil { + t.Fatal(err) + } + // Add queued dynamic fee transaction. + queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) + if err := pool.addLocal(queuedTx); err != nil { + t.Fatal(err) + } + } + pending, queued := pool.Stats() + expPending, expQueued := 1000, 1000 + validate := func() { + pending, queued = pool.Stats() + if pending != expPending { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, expPending) + } + if queued != expQueued { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued) + } + + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + } + validate() + + // Reprice the pool and check that nothing is dropped + pool.SetGasTip(big.NewInt(2)) + validate() + + pool.SetGasTip(big.NewInt(2)) + pool.SetGasTip(big.NewInt(4)) + pool.SetGasTip(big.NewInt(8)) + pool.SetGasTip(big.NewInt(100)) + validate() +} + +// Tests that when the pool reaches its global transaction limit, underpriced +// transactions are gradually shifted out for more expensive ones and any gapped +// pending transactions are moved into the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestUnderpricingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = 2 + config.GlobalQueue = 2 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) + + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[1])) + + ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotes(txs) + pool.addLocal(ltx) + + pending, queued := pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 3); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding an underpriced transaction on block limit fails + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + // Replace a future transaction with a future transaction + if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("failed to add well priced transaction: %v", err) + } + // Ensure that adding high priced transactions drops cheap ones, but not own + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + t.Fatalf("failed to add well priced transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 + t.Fatalf("failed to add well priced transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 + t.Fatalf("failed to add well priced transaction: %v", err) + } + // Ensure that replacing a pending transaction with a future transaction fails + if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { + t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending) + } + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("additional event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding local transactions can push out even higher priced ones + ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to append underpriced local transaction: %v", err) + } + ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to add new underpriced local transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that more expensive transactions push out cheap ones from the pool, but +// without producing instability by creating gaps that start jumping transactions +// back and forth between queued/pending. +func TestStableUnderpricingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = 128 + config.GlobalQueue = 0 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 2) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Fill up the entire queue with the same transaction price points + txs := types.Transactions{} + for i := uint64(0); i < config.GlobalSlots; i++ { + txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) + } + pool.addRemotesSync(txs) + + pending, queued := pool.Stats() + if pending != int(config.GlobalSlots) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, int(config.GlobalSlots)); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { + t.Fatalf("failed to add well priced transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != int(config.GlobalSlots) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("additional event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that when the pool reaches its global transaction limit, underpriced +// transactions (legacy & dynamic fee) are gradually shifted out for more +// expensive ones and any gapped pending transactions are moved into the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestUnderpricingDynamicFeeNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, _ := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + pool.config.GlobalSlots = 2 + pool.config.GlobalQueue = 2 + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 4) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) + txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(1), keys[1])) + + ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1 + pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + + pending, queued := pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 3); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Ensure that adding an underpriced transaction fails + tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + + // Ensure that adding high priced transactions drops cheap ones, but not own + tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1]) + if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + t.Fatalf("failed to add well priced transaction: %v", err) + } + + tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1]) + if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 + t.Fatalf("failed to add well priced transaction: %v", err) + } + tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1]) + if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 + t.Fatalf("failed to add well priced transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("additional event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding local transactions can push out even higher priced ones + ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to append underpriced local transaction: %v", err) + } + ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to add new underpriced local transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests whether highest fee cap transaction is retained after a batch of high effective +// tip transactions are added and vice versa +func TestDualHeapEvictionNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, _ := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + pool.config.GlobalSlots = 10 + pool.config.GlobalQueue = 10 + + var ( + highTip, highCap *types.Transaction + baseFee int + ) + + check := func(tx *types.Transaction, name string) { + if pool.all.GetRemote(tx.Hash()) == nil { + t.Fatalf("highest %s transaction evicted from the pool", name) + } + } + + add := func(urgent bool) { + for i := 0; i < 20; i++ { + var tx *types.Transaction + // Create a test accounts and fund it + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000)) + if urgent { + tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key) + highTip = tx + } else { + tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key) + highCap = tx + } + pool.addRemotesSync([]*types.Transaction{tx}) + } + pending, queued := pool.Stats() + if pending+queued != 20 { + t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10) + } + } + + add(false) + for baseFee = 0; baseFee <= 1000; baseFee += 100 { + pool.priced.SetBaseFee(big.NewInt(int64(baseFee))) + add(true) + check(highCap, "fee cap") + add(false) + check(highTip, "effective tip") + } + + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that the pool rejects duplicate transactions. +func TestDeduplicationNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a test account to add transactions with + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) + + // Create a batch of transactions and add a few of them + txs := make([]*types.Transaction, 16) + for i := 0; i < len(txs); i++ { + txs[i] = pricedTransaction(uint64(i), 100000, big.NewInt(1), key) + } + var firsts []*types.Transaction + for i := 0; i < len(txs); i += 2 { + firsts = append(firsts, txs[i]) + } + errs := pool.addRemotesSync(firsts) + if len(errs) != len(firsts) { + t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) + } + for i, err := range errs { + if err != nil { + t.Errorf("add %d failed: %v", i, err) + } + } + pending, queued := pool.Stats() + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) + } + if queued != len(txs)/2-1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) + } + // Try to add all of them now and ensure previous ones error out as knowns + errs = pool.addRemotesSync(txs) + if len(errs) != len(txs) { + t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) + } + for i, err := range errs { + if i%2 == 0 && err == nil { + t.Errorf("add %d succeeded, should have failed as known", i) + } + if i%2 == 1 && err != nil { + t.Errorf("add %d failed: %v", i, err) + } + } + pending, queued = pool.Stats() + if pending != len(txs) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs)) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that the pool rejects replacement transactions that don't meet the minimum +// price bump required. +func TestReplacementNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a test account to add transactions with + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) + + // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) + price := int64(100) + threshold := (price * (100 + int64(testTxPoolConfig.PriceBump))) / 100 + + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { + t.Fatalf("failed to add original cheap pending transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { + t.Fatalf("failed to replace original cheap pending transaction: %v", err) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("cheap replacement event firing failed: %v", err) + } + + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { + t.Fatalf("failed to add original proper pending transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { + t.Fatalf("failed to replace original proper pending transaction: %v", err) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("proper replacement event firing failed: %v", err) + } + + // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { + t.Fatalf("failed to add original cheap queued transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { + t.Fatalf("failed to replace original cheap queued transaction: %v", err) + } + + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { + t.Fatalf("failed to add original proper queued transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { + t.Fatalf("failed to replace original proper queued transaction: %v", err) + } + + if err := validateEvents(events, 0); err != nil { + t.Fatalf("queued replacement event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that the pool rejects replacement dynamic fee transactions that don't +// meet the minimum price bump required. +func TestReplacementDynamicFeeNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + pool, key := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) + gasFeeCap := int64(100) + feeCapThreshold := (gasFeeCap * (100 + int64(testTxPoolConfig.PriceBump))) / 100 + gasTipCap := int64(60) + tipThreshold := (gasTipCap * (100 + int64(testTxPoolConfig.PriceBump))) / 100 + + // Run the following identical checks for both the pending and queue pools: + // 1. Send initial tx => accept + // 2. Don't bump tip or fee cap => discard + // 3. Bump both more than min => accept + // 4. Check events match expected (2 new executable txs during pending, 0 during queue) + // 5. Send new tx with larger tip and gasFeeCap => accept + // 6. Bump tip max allowed so it's still underpriced => discard + // 7. Bump fee cap max allowed so it's still underpriced => discard + // 8. Bump tip min for acceptance => discard + // 9. Bump feecap min for acceptance => discard + // 10. Bump feecap and tip min for acceptance => accept + // 11. Check events match expected (2 new executable txs during pending, 0 during queue) + stages := []string{"pending", "queued"} + for _, stage := range stages { + // Since state is empty, 0 nonce txs are "executable" and can go + // into pending immediately. 2 nonce txs are "gapped" + nonce := uint64(0) + if stage == "queued" { + nonce = 2 + } + + // 1. Send initial tx => accept + tx := dynamicFeeTx(nonce, 100000, big.NewInt(2), big.NewInt(1), key) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add original cheap %s transaction: %v", stage, err) + } + // 2. Don't bump tip or feecap => discard + tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 3. Bump both more than min => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) + } + // 4. Check events match expected (2 new executable txs during pending, 0 during queue) + count := 2 + if stage == "queued" { + count = 0 + } + if err := validateEvents(events, count); err != nil { + t.Fatalf("cheap %s replacement event firing failed: %v", stage, err) + } + // 5. Send new tx with larger tip and feeCap => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(gasTipCap), key) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add original proper %s transaction: %v", stage, err) + } + // 6. Bump tip max allowed so it's still underpriced => discard + tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 7. Bump fee cap max allowed so it's still underpriced => discard + tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 8. Bump tip min for acceptance => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 9. Bump fee cap min for acceptance => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 10. Check events match expected (3 new executable txs during pending, 0 during queue) + tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) + } + // 11. Check events match expected (3 new executable txs during pending, 0 during queue) + count = 2 + if stage == "queued" { + count = 0 + } + if err := validateEvents(events, count); err != nil { + t.Fatalf("replacement %s event firing failed: %v", stage, err) + } + } + + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that local transactions are journaled to disk, but remote transactions +// get discarded between restarts. +func TestJournalingNoAuctioneer(t *testing.T) { testJournalingNoAuctioneer(t, false) } +func TestJournalingNoLocalsNoAuctioneer(t *testing.T) { testJournalingNoAuctioneer(t, true) } + +func testJournalingNoAuctioneer(t *testing.T, nolocals bool) { + t.Parallel() + + // Create a temporary file for the journal + file, err := os.CreateTemp("", "") + if err != nil { + t.Fatalf("failed to create temporary journal: %v", err) + } + journal := file.Name() + defer os.Remove(journal) + + // Clean up the temporary file, we only need the path for now + file.Close() + os.Remove(journal) + + // Create the original pool to inject transaction into the journal + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.NoLocals = nolocals + config.Journal = journal + config.Rejournal = time.Second + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + + // Create two test accounts to ensure remotes expire but locals do not + local, _ := crypto.GenerateKey() + remote, _ := crypto.GenerateKey() + + testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000)) + testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) + + // Add three local and a remote transactions and ensure they are queued up + if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + pending, queued := pool.Stats() + if pending != 4 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive + pool.Close() + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) + blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool = New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + + pending, queued = pool.Stats() + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if nolocals { + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + } else { + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Bump the nonce temporarily and ensure the newly invalidated transaction is removed + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) + <-pool.requestReset(nil, nil) + time.Sleep(2 * config.Rejournal) + pool.Close() + + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) + blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + pool = New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if nolocals { + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + } else { + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + pool.Close() +} + +// TestStatusCheck tests that the pool can correctly retrieve the +// pending status of individual transactions. +func TestStatusCheckNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the status retrievals with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create the test accounts to check various transaction statuses with + keys := make([]*ecdsa.PrivateKey, 3) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) // Pending only + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[1])) // Pending and queued + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[1])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only + + // Import the transaction and ensure they are correctly added + pool.addRemotesSync(txs) + + pending, queued := pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Retrieve the status of each transaction and validate them + hashes := make([]common.Hash, len(txs)) + for i, tx := range txs { + hashes[i] = tx.Hash() + } + hashes = append(hashes, common.Hash{}) + expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown} + + for i := 0; i < len(hashes); i++ { + if status := pool.Status(hashes[i]); status != expect[i] { + t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i]) + } + } +} + +// Test the transaction slots consumption is computed correctly +func TestSlotCountNoAuctioneer(t *testing.T) { + t.Parallel() + + key, _ := crypto.GenerateKey() + + // Check that an empty transaction consumes a single slot + smallTx := pricedDataTransaction(0, 0, big.NewInt(0), key, 0) + if slots := numSlots(smallTx); slots != 1 { + t.Fatalf("small transactions slot count mismatch: have %d want %d", slots, 1) + } + // Check that a large transaction consumes the correct number of slots + bigTx := pricedDataTransaction(0, 0, big.NewInt(0), key, uint64(10*txSlotSize)) + if slots := numSlots(bigTx); slots != 11 { + t.Fatalf("big transactions slot count mismatch: have %d want %d", slots, 11) + } +} + +// Benchmarks the speed of validating the contents of the pending queue of the +// transaction pool. +func BenchmarkPendingDemotion100NoAuctioneer(b *testing.B) { + benchmarkPendingDemotionNoAuctioneer(b, 100) +} +func BenchmarkPendingDemotion1000NoAuctioneer(b *testing.B) { + benchmarkPendingDemotionNoAuctioneer(b, 1000) +} +func BenchmarkPendingDemotion10000NoAuctioneer(b *testing.B) { + benchmarkPendingDemotionNoAuctioneer(b, 10000) +} + +func benchmarkPendingDemotionNoAuctioneer(b *testing.B, size int) { + // Add a batch of transactions to a pool one by one + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + for i := 0; i < size; i++ { + tx := transaction(uint64(i), 100000, key) + pool.promoteTx(account, tx.Hash(), tx) + } + // Benchmark the speed of pool validation + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.demoteUnexecutables() + } +} + +// Benchmarks the speed of scheduling the contents of the future queue of the +// transaction pool. +func BenchmarkFuturePromotion100NoAuctioneer(b *testing.B) { + benchmarkFuturePromotionNoAuctioneer(b, 100) +} +func BenchmarkFuturePromotion1000NoAuctioneer(b *testing.B) { + benchmarkFuturePromotionNoAuctioneer(b, 1000) +} +func BenchmarkFuturePromotion10000NoAuctioneer(b *testing.B) { + benchmarkFuturePromotionNoAuctioneer(b, 10000) +} + +func benchmarkFuturePromotionNoAuctioneer(b *testing.B, size int) { + // Add a batch of transactions to a pool one by one + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + for i := 0; i < size; i++ { + tx := transaction(uint64(1+i), 100000, key) + pool.enqueueTx(tx.Hash(), tx, false, true) + } + // Benchmark the speed of pool validation + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.promoteExecutables(nil) + } +} + +// Benchmarks the speed of batched transaction insertion. +func BenchmarkBatchInsert100NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 100, false) +} +func BenchmarkBatchInsert1000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 1000, false) +} +func BenchmarkBatchInsert10000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 10000, false) +} + +func BenchmarkBatchLocalInsert100NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 100, true) +} +func BenchmarkBatchLocalInsert1000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 1000, true) +} +func BenchmarkBatchLocalInsert10000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 10000, true) +} + +func benchmarkBatchInsertNoAuctioneer(b *testing.B, size int, local bool) { + // Generate a batch of transactions to enqueue into the pool + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000000000000000)) + + batches := make([]types.Transactions, b.N) + for i := 0; i < b.N; i++ { + batches[i] = make(types.Transactions, size) + for j := 0; j < size; j++ { + batches[i][j] = transaction(uint64(size*i+j), 100000, key) + } + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for _, batch := range batches { + if local { + pool.addLocals(batch) + } else { + pool.addRemotes(batch) + } + } +} + +func BenchmarkInsertRemoteWithAllLocalsNoAuctioneer(b *testing.B) { + // Allocate keys for testing + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + + remoteKey, _ := crypto.GenerateKey() + remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey) + + locals := make([]*types.Transaction, 4096+1024) // Occupy all slots + for i := 0; i < len(locals); i++ { + locals[i] = transaction(uint64(i), 100000, key) + } + remotes := make([]*types.Transaction, 1000) + for i := 0; i < len(remotes); i++ { + remotes[i] = pricedTransaction(uint64(i), 100000, big.NewInt(2), remoteKey) // Higher gasprice + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + pool, _ := setupPool(false) + testAddBalance(pool, account, big.NewInt(100000000)) + for _, local := range locals { + pool.addLocal(local) + } + b.StartTimer() + // Assign a high enough balance for testing + testAddBalance(pool, remoteAddr, big.NewInt(100000000)) + for i := 0; i < len(remotes); i++ { + pool.addRemotes([]*types.Transaction{remotes[i]}) + } + pool.Close() + } +} + +// Benchmarks the speed of batch transaction insertion in case of multiple accounts. +func BenchmarkMultiAccountBatchInsertNoAuctioneer(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupPool(false) + defer pool.Close() + b.ReportAllocs() + batches := make(types.Transactions, b.N) + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + pool.currentState.AddBalance(account, uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) + tx := transaction(uint64(0), 100000, key) + batches[i] = tx + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for _, tx := range batches { + pool.addRemotesSync([]*types.Transaction{tx}) + } +} diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index aa9fe6c92..a1395bebc 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -159,16 +159,16 @@ func makeAddressReserver() txpool.AddressReserver { } } -func setupPool() (*LegacyPool, *ecdsa.PrivateKey) { - return setupPoolWithConfig(params.TestChainConfig) +func setupPool(auctioneerEnabled bool) (*LegacyPool, *ecdsa.PrivateKey) { + return setupPoolWithConfig(params.TestChainConfig, auctioneerEnabled) } -func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) { +func setupPoolWithConfig(config *params.ChainConfig, auctioneerEnabled bool) (*LegacyPool, *ecdsa.PrivateKey) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed)) key, _ := crypto.GenerateKey() - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, auctioneerEnabled) if err := pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()); err != nil { panic(err) } @@ -284,7 +284,7 @@ func TestStateChangeDuringReset(t *testing.T) { tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -341,7 +341,7 @@ func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { func TestInvalidTransactions(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx := transaction(0, 100, key) @@ -379,7 +379,7 @@ func TestInvalidTransactions(t *testing.T) { func TestQueue(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx := transaction(0, 100, key) @@ -410,7 +410,7 @@ func TestQueue(t *testing.T) { func TestQueue2(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx1 := transaction(0, 100, key) @@ -436,7 +436,7 @@ func TestQueue2(t *testing.T) { func TestNegativeValue(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) @@ -450,7 +450,7 @@ func TestNegativeValue(t *testing.T) { func TestTipAboveFeeCap(t *testing.T) { t.Parallel() - pool, key := setupPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config, true) defer pool.Close() tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) @@ -463,7 +463,7 @@ func TestTipAboveFeeCap(t *testing.T) { func TestVeryHighValues(t *testing.T) { t.Parallel() - pool, key := setupPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config, true) defer pool.Close() veryBigNumber := big.NewInt(1) @@ -483,7 +483,7 @@ func TestVeryHighValues(t *testing.T) { func TestChainFork(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -512,7 +512,7 @@ func TestChainFork(t *testing.T) { func TestRemoveTxSanity(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -573,7 +573,7 @@ func TestRemoveTxSanity(t *testing.T) { func TestDoubleNonce(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -624,7 +624,7 @@ func TestDoubleNonce(t *testing.T) { func TestMissingNonce(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -648,7 +648,7 @@ func TestNonceRecovery(t *testing.T) { t.Parallel() const n = 10 - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -674,7 +674,7 @@ func TestDropping(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -689,21 +689,15 @@ func TestDropping(t *testing.T) { tx11 = transaction(11, 200, key) tx12 = transaction(12, 300, key) ) - pool.all.Add(tx0, false) - pool.priced.Put(tx0, false) - pool.promoteTx(account, tx0.Hash(), tx0) - pool.all.Add(tx1, false) - pool.priced.Put(tx1, false) - pool.promoteTx(account, tx1.Hash(), tx1) + pool.add(tx0, false) + pool.add(tx1, false) + pool.add(tx2, false) + pool.add(tx10, false) + pool.add(tx11, false) + pool.add(tx12, false) - pool.all.Add(tx2, false) - pool.priced.Put(tx2, false) - pool.promoteTx(account, tx2.Hash(), tx2) - - pool.enqueueTx(tx10.Hash(), tx10, false, true) - pool.enqueueTx(tx11.Hash(), tx11, false, true) - pool.enqueueTx(tx12.Hash(), tx12, false, true) + pool.promoteExecutables([]common.Address{account}) // Check that pre and post validations leave the pool as is if pool.pending[account].Len() != 3 { @@ -756,7 +750,7 @@ func TestPostponing(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -837,7 +831,7 @@ func TestGapFilling(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -891,7 +885,7 @@ func TestQueueAccountLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -920,96 +914,6 @@ func TestQueueAccountLimiting(t *testing.T) { } } -// Tests that if the transaction count belonging to multiple accounts go above -// some threshold, the higher transactions are dropped to prevent DOS attacks. -// -// This logic should not hold for local transactions, unless the local tracking -// mechanism is disabled. -func TestQueueGlobalLimiting(t *testing.T) { - testQueueGlobalLimiting(t, false) -} -func TestQueueGlobalLimitingNoLocals(t *testing.T) { - testQueueGlobalLimiting(t, true) -} - -func testQueueGlobalLimiting(t *testing.T, nolocals bool) { - t.Parallel() - - // Create the pool to test the limit enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - - config := testTxPoolConfig - config.NoLocals = nolocals - config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) - - pool := New(config, blockchain) - pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() - - // Create a number of test accounts and fund them (last one will be the local) - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) - } - local := keys[len(keys)-1] - - // Generate and queue a batch of transactions - nonces := make(map[common.Address]uint64) - - txs := make(types.Transactions, 0, 3*config.GlobalQueue) - for len(txs) < cap(txs) { - key := keys[rand.Intn(len(keys)-1)] // skip adding transactions with the local account - addr := crypto.PubkeyToAddress(key.PublicKey) - - txs = append(txs, transaction(nonces[addr]+1, 100000, key)) - nonces[addr]++ - } - // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) - - queued := 0 - for addr, list := range pool.queue { - if list.Len() > int(config.AccountQueue) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) - } - queued += list.Len() - } - if queued > int(config.GlobalQueue) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) - } - // Generate a batch of transactions from the local account and import them - txs = txs[:0] - for i := uint64(0); i < 3*config.GlobalQueue; i++ { - txs = append(txs, transaction(i+1, 100000, local)) - } - pool.addLocals(txs) - - // If locals are disabled, the previous eviction algorithm should apply here too - if nolocals { - queued := 0 - for addr, list := range pool.queue { - if list.Len() > int(config.AccountQueue) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) - } - queued += list.Len() - } - if queued > int(config.GlobalQueue) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) - } - } else { - // Local exemptions are enabled, make sure the local account owned the queue - if len(pool.queue) != 1 { - t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1) - } - // Also ensure no local transactions are ever dropped, even if above global limits - if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue { - t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue) - } - } -} - // Tests that if an account remains idle for a prolonged amount of time, any // non-executable transactions queued up are dropped to prevent wasting resources // on shuffling them around. @@ -1037,7 +941,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { config.Lifetime = time.Second config.NoLocals = nolocals - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1142,7 +1046,7 @@ func TestPendingLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -1189,7 +1093,7 @@ func TestPendingGlobalLimiting(t *testing.T) { config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1232,7 +1136,7 @@ func TestAllowedTxSize(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -1293,7 +1197,7 @@ func TestCapClearsFromAll(t *testing.T) { config.AccountQueue = 2 config.GlobalSlots = 8 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1326,7 +1230,7 @@ func TestPendingMinimumAllowance(t *testing.T) { config := testTxPoolConfig config.GlobalSlots = 1 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1372,7 +1276,7 @@ func TestRepricing(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1494,7 +1398,7 @@ func TestMinGasPriceEnforced(t *testing.T) { txPoolConfig := DefaultConfig txPoolConfig.NoLocals = true - pool := New(txPoolConfig, blockchain) + pool := New(txPoolConfig, blockchain, true) pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1538,7 +1442,7 @@ func TestRepricingDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, _ := setupPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config, true) defer pool.Close() // Keep track of transaction events to ensure all executables get announced @@ -1665,7 +1569,7 @@ func TestRepricingKeepsLocals(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1743,7 +1647,7 @@ func TestUnderpricing(t *testing.T) { config.GlobalSlots = 2 config.GlobalQueue = 2 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1858,7 +1762,7 @@ func TestStableUnderpricing(t *testing.T) { config.GlobalSlots = 128 config.GlobalQueue = 0 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1920,7 +1824,7 @@ func TestStableUnderpricing(t *testing.T) { func TestUnderpricingDynamicFee(t *testing.T) { t.Parallel() - pool, _ := setupPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config, true) defer pool.Close() pool.config.GlobalSlots = 2 @@ -2027,7 +1931,7 @@ func TestUnderpricingDynamicFee(t *testing.T) { func TestDualHeapEviction(t *testing.T) { t.Parallel() - pool, _ := setupPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config, true) defer pool.Close() pool.config.GlobalSlots = 10 @@ -2087,7 +1991,7 @@ func TestDeduplication(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -2154,7 +2058,7 @@ func TestReplacement(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -2232,7 +2136,7 @@ func TestReplacementDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, key := setupPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config, true) defer pool.Close() testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) @@ -2338,7 +2242,6 @@ func TestReplacementDynamicFee(t *testing.T) { // Tests that local transactions are journaled to disk, but remote transactions // get discarded between restarts. -// TODO - fix this func TestJournaling(t *testing.T) { testJournaling(t, false) } func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) } @@ -2366,7 +2269,7 @@ func testJournaling(t *testing.T, nolocals bool) { config.Journal = journal config.Rejournal = time.Second - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) // Create two test accounts to ensure remotes expire but locals do not @@ -2404,7 +2307,7 @@ func testJournaling(t *testing.T, nolocals bool) { statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) + pool = New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) pending, queued = pool.Stats() @@ -2436,7 +2339,7 @@ func testJournaling(t *testing.T, nolocals bool) { statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) + pool = New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) // tx mempool is cleared out completely after a reset @@ -2474,7 +2377,7 @@ func TestStatusCheck(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -2546,7 +2449,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1 func benchmarkPendingDemotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2571,7 +2474,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1 func benchmarkFuturePromotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2599,7 +2502,7 @@ func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 1000 func benchmarkBatchInsert(b *testing.B, size int, local bool) { // Generate a batch of transactions to enqueue into the pool - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2643,7 +2546,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - pool, _ := setupPool() + pool, _ := setupPool(true) testAddBalance(pool, account, big.NewInt(100000000)) for _, local := range locals { pool.addLocal(local) @@ -2661,7 +2564,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { // Benchmarks the speed of batch transaction insertion in case of multiple accounts. func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool - pool, _ := setupPool() + pool, _ := setupPool(true) defer pool.Close() b.ReportAllocs() batches := make(types.Transactions, b.N) diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 883af5635..432433a96 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -78,22 +78,25 @@ type TxPool struct { term chan struct{} // Termination channel to detect a closed pool sync chan chan error // Testing / simulator channel to block until internal reset is done + + auctioneerEnabled bool } // New creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) { +func New(gasTip uint64, chain BlockChain, subpools []SubPool, auctioneerEnabled bool) (*TxPool, error) { // Retrieve the current head so that all subpools and this main coordinator // pool will have the same starting state, even if the chain moves forward // during initialization. head := chain.CurrentBlock() pool := &TxPool{ - subpools: subpools, - reservations: make(map[common.Address]SubPool), - quit: make(chan chan error), - term: make(chan struct{}), - sync: make(chan chan error), + subpools: subpools, + reservations: make(map[common.Address]SubPool), + quit: make(chan chan error), + term: make(chan struct{}), + sync: make(chan chan error), + auctioneerEnabled: auctioneerEnabled, } for i, subpool := range subpools { if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil { @@ -192,6 +195,12 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) { ) defer newOptimisticHeadSub.Unsubscribe() + var ( + newHeadCh = make(chan core.ChainHeadEvent) + newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh) + ) + defer newHeadSub.Unsubscribe() + // Track the previous and current head to feed to an idle reset var ( oldHead = head @@ -245,8 +254,15 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) { // Wait for the next chain head event or a previous reset finish select { case event := <-newOptimisticHeadCh: - // Chain moved forward, store the head for later consumption - newHead = event.Block.Header() + if p.auctioneerEnabled { + // Chain moved forward, store the head for later consumption + newHead = event.Block.Header() + } + case event := <-newHeadCh: + if !p.auctioneerEnabled { + // Chain moved forward, store the head for later consumption + newHead = event.Block.Header() + } case head := <-resetDone: // Previous reset finished, update the old head and allow a new reset diff --git a/eth/api_backend.go b/eth/api_backend.go index 304904365..2b5c820c4 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -143,6 +143,13 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe } return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil } + if number == rpc.OptimisticBlockNumber { + header := b.eth.blockchain.CurrentOptimisticBlock() + if header == nil { + return nil, errors.New("optimistic block not found") + } + return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil + } return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil } diff --git a/eth/backend.go b/eth/backend.go index bea001c68..b24c4bf46 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -235,9 +235,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.TxPool.Journal != "" { config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } - legacyPool := legacypool.New(config.TxPool, eth.blockchain) + legacyPool := legacypool.New(config.TxPool, eth.blockchain, stack.AuctioneerEnabled()) - eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}, stack.AuctioneerEnabled()) if err != nil { return nil, err } diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 934dadc9a..84cac22f8 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -116,8 +116,8 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, txconfig := legacypool.DefaultConfig txconfig.Journal = "" // Don't litter the disk with test journals - pool := legacypool.New(txconfig, chain) - txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool}) + pool := legacypool.New(txconfig, chain, true) + txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool}, true) return &testBackend{ db: db, diff --git a/genesis.json b/genesis.json index d4ed69eac..e65d980fb 100644 --- a/genesis.json +++ b/genesis.json @@ -40,6 +40,9 @@ } } ], + "astriaAuctioneerAddresses": { + "1": "" + }, "astriaFeeCollectors": { "1": "0xaC21B97d35Bf75A7dAb16f35b111a50e78A72F30" }, diff --git a/go.mod b/go.mod index 1055cd47c..55ab2532b 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module github.com/ethereum/go-ethereum go 1.21 require ( - buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1 - buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1 - buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 - buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1 + buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2 + buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1 + buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1 + buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Microsoft/go-winio v0.6.1 github.com/VictoriaMetrics/fastcache v1.12.1 @@ -15,7 +15,7 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2 github.com/btcsuite/btcd/btcec/v2 v2.2.0 - github.com/btcsuite/btcd/btcutil v1.1.5 + github.com/btcsuite/btcd/btcutil v1.1.6 github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.79.0 github.com/cockroachdb/pebble v1.1.0 @@ -79,12 +79,13 @@ require ( golang.org/x/time v0.5.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.36.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 ) require ( + filippo.io/edwards25519 v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/DataDog/zstd v1.4.5 // indirect @@ -100,6 +101,8 @@ require ( github.com/aws/smithy-go v1.15.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/btcsuite/btcd v0.24.2 // indirect + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect @@ -120,6 +123,7 @@ require ( github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.15 // indirect diff --git a/go.sum b/go.sum index 83a47af38..992acbb76 100644 --- a/go.sum +++ b/go.sum @@ -1,11 +1,41 @@ +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2 h1:9rMXnvPR2EX56tMIqbhOK+DvqKjWb++p5s1/bookIl8= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2/go.mod h1:hdCXwnxpMeoqXK5LCQ6gLMcmMLUDX8T9+hbxYrtj+wQ= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2 h1:W0lzc0sAzlzyKWWXLcuGW+GDsB9VRT+P/4ffP/hwJ4U= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2/go.mod h1:jXiXYlSxLrhrUCAIuLq4cVcfXydbsz9mRVftWx/8eGs= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 h1:wOry49zAbse0G4mt2tFTwa4P2AUMuYCR/0mYcPrpcbs= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1/go.mod h1:+pVCkEpJNp2JtooS8NiydT7bO9+hu11XUZ5Z47DPtXo= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1 h1:gS4erruX5XeMN0MZ7xe4JmEIR3uCWrvzG5HGV725WiI= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1/go.mod h1:oXNLXPUVa006hXUuEk+z5isisNlEbrm0yS+XJeMj6u4= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1 h1:v7QnrDjNmG7I/0aqZdtlP3cBPQGd62w4AYVF8TfAcHM= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:T5EsLvEE5UMk62gVSwNY/7XlxknAP3sL8tYRsU68b4s= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1 h1:VkPk2LvyNK8NF9WmAnodrwgQZ3JiYAHFEmPKXUtlX4E= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1/go.mod h1:xzRLiRun3wTzhd+oBg9VkXi/c4PhjBjj73+2vSMH5eM= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1 h1:3G2O21DuY5Y/G32tP1mAI16AxwDYTscG2YaOb/WQty0= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:U4LUlabiYNYBd1pqYS9o8SsHjBRoEBysrfRVnebzJH0= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1 h1:Twi169wrd7ssCnK27Bymlytv5LmvwFV0zhKhJ64nCYM= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1/go.mod h1:PWzMbPHJ+Y31iNFrtSc5vy/wvm2805ZXyDZndzzFLa0= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000-1f40f333891d.1 h1:CSMft5/33d/88j3ziC4zid4DOP7X1Xv71I6pW3BUOvA= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000-1f40f333891d.1/go.mod h1:7azHjtjY3sk38xuZGlf2X6DpAPgQMoeZZMix+JkqsdU= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1 h1:cRvRFDg3/KPgEB2+8/orNwCWBhZO0wVZKij4TTKBj9w= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1/go.mod h1:oB3M+Fq9RgyUWGMqYk2FqRobQpdH1yZQZ9TYOoc4yIw= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1 h1:GnqNuwC6UjXvtjGscDekiO+/lstY7NWOILlsOMGNpC4= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1/go.mod h1:oB3M+Fq9RgyUWGMqYk2FqRobQpdH1yZQZ9TYOoc4yIw= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 h1:kG4riHqlF9X6iZ1Oxs5/6ul6aue7MS+A6DK6HAchuTk= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1/go.mod h1:n9L7X3VAj4od4VHf2ScJuHARUUQTSxJqtRHZk/7Ptt0= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1 h1:C1bT0G1In6Z6tBERd1XqwDjdxTK+PatSOJYlVk5Is60= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1/go.mod h1:I9FcB1oNqT1nI+ny0GD8gF9YrIYrHmczgNu6MTE9fAo= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.1-00000000000000-9a039a6ed8db.1 h1:v+RKpd5zE6rqOMA44OLRpDLPYlakjmddvmFFrKxzb48= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.1-00000000000000-9a039a6ed8db.1/go.mod h1:HnX2FkSKZuD3zPFBR+Q17WzloqvIbFd0pYE++or/x2Q= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1 h1:inT/lOAbHunpGP9YLqtAQNssrxEIgH/OmxXNwbXjUqs= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1/go.mod h1:Lk1TBSGhOGvbtj0lb7eTeq+Z4N86/67Ay+WWxbqhh6s= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1 h1:hPMoxTiT7jJjnIbWqneBbL05VeVOTD9UeC/qdvzHL8g= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1/go.mod h1:2uasRFMH+a3DaF34c1o+w7/YtYnoknmARyYpb9W2QIc= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1 h1:uJm/22xugluY5AL2NkIDbNEFBxzN6UcI8vts/bGEDBs= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1/go.mod h1:1Z9P18WNTOT+KvLlc0+2FkcBJ7l5eRUUFcnOxHmLeRA= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.1-00000000000000-e54e1c9ad405.1 h1:querphz/TCGphT0qGG4DJo6p8qAsfL5/8SEBgfemVhk= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.1-00000000000000-e54e1c9ad405.1/go.mod h1:D6ou7OxkQXmiZDDNNrT147dA9wC9rhJPchCIfVbw9wM= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1 h1:n2embOKwJS+YIyjHRDvOAo7c/kuv3fw9U+gQ/g2Yis8= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1/go.mod h1:dHPKfn7RW6FSo7EkD0LqPhZUmRm5NXMB+tWvTrTnZTQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -39,6 +69,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= @@ -107,6 +139,8 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= @@ -115,6 +149,8 @@ github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9Ur github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= @@ -339,6 +375,8 @@ github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZn github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= @@ -535,11 +573,15 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= @@ -897,6 +939,12 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 8f65765d6..1bb17cb1e 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -100,7 +100,7 @@ func (s *ExecutionServiceServerV1) GetBlock(ctx context.Context, req *astriaPb.G res, err := s.getBlockFromIdentifier(req.GetIdentifier()) if err != nil { log.Error("failed finding block", err) - return nil, err + return nil, shared.WrapError(err, "failed finding block") } log.Debug("GetBlock completed", "request", req, "response", res) @@ -125,7 +125,7 @@ func (s *ExecutionServiceServerV1) BatchGetBlocks(ctx context.Context, req *astr block, err := s.getBlockFromIdentifier(id) if err != nil { log.Error("failed finding block with id", id, "error", err) - return nil, err + return nil, shared.WrapError(err, fmt.Sprintf("failed finding block with id %s", id.String())) } blocks = append(blocks, block) @@ -170,15 +170,9 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria // the height that this block will be at height := s.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess := types.Transactions{} - for _, tx := range req.Transactions { - unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, s.BridgeAddresses(), s.BridgeAllowedAssets()) - if err != nil { - log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) - continue - } - txsToProcess = append(txsToProcess, unmarshalledTx) - } + addressPrefix := s.Bc().Config().AstriaSequencerAddressPrefix + + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes(), s.AuctioneerAddress(), addressPrefix) // This set of ordered TXs on the TxPool is has been configured to be used by // the Miner when building a payload. @@ -196,7 +190,7 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria payload, err := s.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) - return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") + return nil, status.Errorf(codes.InvalidArgument, shared.WrapError(err, "Could not build block with provided txs").Error()) } // call blockchain.InsertChain to actually execute and write the blocks to @@ -204,12 +198,12 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil) if err != nil { log.Error("failed to convert executable data to block", err) - return nil, status.Error(codes.Internal, "failed to execute block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to convert executable data to block").Error()) } err = s.Bc().InsertBlockWithoutSetHead(block) if err != nil { log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", req.PrevBlockHash, "err", err) - return nil, status.Error(codes.Internal, "failed to insert block to chain") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to insert block to chain").Error()) } // remove txs from original mempool @@ -228,6 +222,14 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria s.SetNextFeeRecipient(next) } + if address, ok := s.Bc().Config().AstriaAuctioneerAddresses[res.Number+1]; ok { + if err := shared.ValidateBech32mAddress(address, addressPrefix); err != nil { + log.Error("auctioneer address is not a valid bech32 address", "block", res.Number+1, "address", address) + } + + s.SetAuctioneerAddress(address) + } + log.Info("ExecuteBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) totalExecutedTxCount.Inc(int64(len(block.Transactions()))) executeBlockSuccessCount.Inc(1) @@ -242,12 +244,12 @@ func (s *ExecutionServiceServerV1) GetCommitmentState(ctx context.Context, req * softBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentSafeBlock()) if err != nil { log.Error("error finding safe block", err) - return nil, status.Error(codes.Internal, "could not locate soft block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "could not locate soft block").Error()) } firmBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentFinalBlock()) if err != nil { log.Error("error finding final block", err) - return nil, status.Error(codes.Internal, "could not locate firm block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "could not locate firm block").Error()) } celestiaBlock := s.Bc().CurrentBaseCelestiaHeight() @@ -310,7 +312,7 @@ func (s *ExecutionServiceServerV1) UpdateCommitmentState(ctx context.Context, re if currentHead != softEthHash { if _, err := s.Bc().SetCanonical(softBlock); err != nil { log.Error("failed updating canonical chain to soft block", err) - return nil, status.Error(codes.Internal, "Could not update head to safe hash") + return nil, status.Error(codes.Internal, shared.WrapError(err, "Could not update head to safe hash").Error()) } } @@ -366,7 +368,7 @@ func (s *ExecutionServiceServerV1) getBlockFromIdentifier(identifier *astriaPb.B res, err := ethHeaderToExecutionBlock(header) if err != nil { // This should never happen since we validate header exists above. - return nil, status.Error(codes.Internal, "internal error") + return nil, status.Error(codes.Internal, shared.WrapError(err, "internal error").Error()) } return res, nil @@ -438,3 +440,11 @@ func (s *ExecutionServiceServerV1) BridgeAllowedAssets() map[string]struct{} { func (s *ExecutionServiceServerV1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } + +func (s *ExecutionServiceServerV1) AuctioneerAddress() string { + return s.sharedServiceContainer.AuctioneerAddress() +} + +func (s *ExecutionServiceServerV1) SetAuctioneerAddress(auctioneerAddress string) { + s.sharedServiceContainer.SetAuctioneerAddress(auctioneerAddress) +} diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index fc5128229..211be906b 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -21,11 +21,11 @@ import ( "testing" ) -func TestExecutionService_GetGenesisInfo(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) +func TestExecutionServiceV1_GetGenesisInfo(t *testing.T) { + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") hashedRollupId := sha256.Sum256([]byte(ethservice.BlockChain().Config().AstriaRollupName)) @@ -33,14 +33,14 @@ func TestExecutionService_GetGenesisInfo(t *testing.T) { require.True(t, bytes.Equal(genesisInfo.RollupId.Inner, hashedRollupId[:]), "RollupId is not correct") require.Equal(t, genesisInfo.GetSequencerGenesisBlockHeight(), ethservice.BlockChain().Config().AstriaSequencerInitialHeight, "SequencerInitialHeight is not correct") require.Equal(t, genesisInfo.GetCelestiaBlockVariance(), ethservice.BlockChain().Config().AstriaCelestiaHeightVariance, "CelestiaHeightVariance is not correct") - require.True(t, serviceV1Alpha1.sharedServiceContainer.GenesisInfoCalled(), "GetGenesisInfo should be called") + require.True(t, serviceV1.sharedServiceContainer.GenesisInfoCalled(), "GetGenesisInfo should be called") } -func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) +func TestExecutionServiceServerV1_GetCommitmentState(t *testing.T) { + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") @@ -60,12 +60,12 @@ func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { require.Equal(t, uint64(commitmentState.Firm.Number), firmBlock.Number.Uint64(), "Firm Block Number do not match") require.Equal(t, commitmentState.BaseCelestiaHeight, ethservice.BlockChain().Config().AstriaCelestiaInitialHeight, "BaseCelestiaHeight is not correct") - require.True(t, serviceV1Alpha1.sharedServiceContainer.CommitmentStateCalled(), "GetCommitmentState should be called") + require.True(t, serviceV1.sharedServiceContainer.CommitmentStateCalled(), "GetCommitmentState should be called") } -func TestExecutionService_GetBlock(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) +func TestExecutionServiceV1_GetBlock(t *testing.T) { + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { description string @@ -97,7 +97,7 @@ func TestExecutionService_GetBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - blockInfo, err := serviceV1Alpha1.GetBlock(context.Background(), tt.getBlockRequst) + blockInfo, err := serviceV1.GetBlock(context.Background(), tt.getBlockRequst) if tt.expectedReturnCode > 0 { require.NotNil(t, err, "GetBlock should return an error") require.Equal(t, tt.expectedReturnCode, status.Code(err), "GetBlock failed") @@ -123,9 +123,9 @@ func TestExecutionService_GetBlock(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) +func TestExecutionServiceServerV1_BatchGetBlocks(t *testing.T) { + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { description string @@ -175,7 +175,7 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - batchBlocksRes, err := serviceV1Alpha1.BatchGetBlocks(context.Background(), tt.batchGetBlockRequest) + batchBlocksRes, err := serviceV1.BatchGetBlocks(context.Background(), tt.batchGetBlockRequest) if tt.expectedReturnCode > 0 { require.NotNil(t, err, "BatchGetBlocks should return an error") require.Equal(t, tt.expectedReturnCode, status.Code(err), "BatchGetBlocks failed") @@ -195,8 +195,8 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { - ethservice, _ := shared.SetupSharedService(t, 10) +func TestExecutionServiceServerV1_ExecuteBlock(t *testing.T) { + ethservice, _, _, _ := shared.SetupSharedService(t, 10) tests := []struct { description string @@ -248,19 +248,19 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { // reset the blockchain with each test - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) var err error // adding this to prevent shadowing of genesisInfo in the below if branch var genesisInfo *astriaPb.GenesisInfo var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState if tt.callGenesisInfoAndGetCommitmentState { // call getGenesisInfo and getCommitmentState before calling executeBlock - genesisInfo, err = serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err = serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") - commitmentStateBeforeExecuteBlock, err = serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentStateBeforeExecuteBlock, err = serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil") } @@ -319,7 +319,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { Transactions: marshalledTxs, } - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq) if tt.expectedReturnCode > 0 { require.NotNil(t, err, "ExecuteBlock should return an error") require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteBlock failed") @@ -331,7 +331,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") // check if commitment state is not updated - commitmentStateAfterExecuteBlock, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentStateAfterExecuteBlock, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated") @@ -341,17 +341,17 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) +func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitment(t *testing.T) { + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") // call get commitment state - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") @@ -416,7 +416,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi Transactions: marshalledTxs, } - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq) require.Nil(t, err, "ExecuteBlock failed") require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") @@ -444,7 +444,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi }, } - updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) + updateCommitmentStateRes, err := serviceV1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) require.Nil(t, err, "UpdateCommitmentState failed") require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil") require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request") @@ -478,17 +478,17 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi } // Check that invalid transactions are not added into a block and are removed from the mempool -func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) +func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) { + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") // call get commitment state - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") @@ -541,7 +541,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval Transactions: marshalledTxs, } - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq) require.Nil(t, err, "ExecuteBlock failed") require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") @@ -569,7 +569,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval }, } - updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) + updateCommitmentStateRes, err := serviceV1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) require.Nil(t, err, "UpdateCommitmentState failed") require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil") require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request") diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index d96e6a876..52cf8116c 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -1,17 +1,15 @@ package optimistic import ( - optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc" + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" "context" "errors" - "fmt" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/grpc/shared" "github.com/ethereum/go-ethereum/log" @@ -30,7 +28,7 @@ import ( type OptimisticServiceV1Alpha1 struct { optimisticGrpc.UnimplementedOptimisticExecutionServiceServer - optimisticGrpc.UnimplementedBundleServiceServer + optimisticGrpc.UnimplementedAuctionServiceServer sharedServiceContainer *shared.SharedServiceContainer @@ -40,6 +38,8 @@ type OptimisticServiceV1Alpha1 struct { var ( executeOptimisticBlockRequestCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_requests", nil) executeOptimisticBlockSuccessCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_success", nil) + optimisticBlockHeight = metrics.GetOrRegisterGauge("astria/execution/optimistic_block_height", nil) + txsStreamedCount = metrics.GetOrRegisterCounter("astria/optimistic/txs_streamed", nil) executionOptimisticBlockTimer = metrics.GetOrRegisterTimer("astria/optimistic/execute_optimistic_block_time", nil) ) @@ -54,7 +54,9 @@ func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceCon return optimisticService } -func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { +func (o *OptimisticServiceV1Alpha1) GetBidStream(_ *optimsticPb.GetBidStreamRequest, stream optimisticGrpc.AuctionService_GetBidStreamServer) error { + log.Debug("GetBidStream called") + pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() @@ -67,7 +69,7 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre optimisticBlock := o.Eth().BlockChain().CurrentOptimisticBlock() for _, pendingTx := range pendingTxs.Txs { - bundle := optimsticPb.Bundle{} + bid := optimsticPb.Bid{} totalCost := big.NewInt(0) effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee)) @@ -76,32 +78,41 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre marshalledTxs := [][]byte{} marshalledTx, err := pendingTx.MarshalBinary() if err != nil { - return status.Errorf(codes.Internal, "error marshalling tx: %v", err) + return status.Errorf(codes.Internal, shared.WrapError(err, "error marshalling tx").Error()) } marshalledTxs = append(marshalledTxs, marshalledTx) - bundle.Fee = totalCost.Uint64() - bundle.Transactions = marshalledTxs - bundle.BaseSequencerBlockHash = *o.currentOptimisticSequencerBlock.Load() - bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() + bid.Fee = totalCost.Uint64() + bid.Transactions = marshalledTxs + bid.SequencerParentBlockHash = *o.currentOptimisticSequencerBlock.Load() + bid.RollupParentBlockHash = optimisticBlock.Hash().Bytes() - err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) + txsStreamedCount.Inc(1) + err = stream.Send(&optimsticPb.GetBidStreamResponse{Bid: &bid}) if err != nil { - return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) + log.Error("error sending bid over stream", "err", err) + return status.Error(codes.Internal, shared.WrapError(err, "error sending bid over stream").Error()) } } case err := <-pendingTxEvent.Err(): - return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) + if err != nil { + log.Error("error waiting for pending transactions", "err", err) + return status.Error(codes.Internal, shared.WrapError(err, "error waiting for pending transactions").Error()) + } else { + // TODO - what is the right error code here? + return status.Error(codes.Internal, "tx pool subscription closed") + } case <-stream.Context().Done(): - log.Debug("GetBundleStream stream closed with error", "err", stream.Context().Err()) return stream.Context().Err() } } } func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { + log.Debug("ExecuteOptimisticBlockStream called") + mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() @@ -116,12 +127,14 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist return err } + executeOptimisticBlockRequestCount.Inc(1) + baseBlock := msg.GetBaseBlock() // execute the optimistic block and wait for the mempool clearing event optimisticBlock, err := o.ExecuteOptimisticBlock(stream.Context(), baseBlock) if err != nil { - return status.Error(codes.Internal, "failed to execute optimistic block") + return status.Errorf(codes.Internal, shared.WrapError(err, "failed to execute optimistic block").Error()) } optimisticBlockHash := common.BytesToHash(optimisticBlock.Hash) @@ -132,14 +145,24 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") } o.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) + executeOptimisticBlockSuccessCount.Inc(1) err = stream.Send(&optimsticPb.ExecuteOptimisticBlockStreamResponse{ Block: optimisticBlock, BaseSequencerBlockHash: baseBlock.SequencerBlockHash, }) case <-time.After(500 * time.Millisecond): + log.Error("timed out waiting for mempool to clear after optimistic block execution") return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") case err := <-mempoolClearingEvent.Err(): - return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) + if err != nil { + log.Error("error waiting for mempool clearing event", "err", err) + return status.Errorf(codes.Internal, shared.WrapError(err, "error waiting for mempool clearing event").Error()) + } else { + // TODO - what is the right error code here? + return status.Error(codes.Internal, "mempool clearance subscription closed") + } + case <-stream.Context().Done(): + return stream.Context().Err() } } } @@ -147,49 +170,30 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) { // we need to execute the optimistic block log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash) - executeOptimisticBlockRequestCount.Inc(1) + + // Deliberately called after lock, to more directly measure the time spent executing + executionStart := time.Now() + defer executionOptimisticBlockTimer.UpdateSince(executionStart) if err := validateStaticExecuteOptimisticBlockRequest(req); err != nil { log.Error("ExecuteOptimisticBlock called with invalid BaseBlock", "err", err) - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("BaseBlock is invalid: %s", err.Error())) + return nil, status.Error(codes.InvalidArgument, shared.WrapError(err, "invalid BaseBlock").Error()) } if !o.SyncMethodsCalled() { return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called") } - // Deliberately called after lock, to more directly measure the time spent executing - executionStart := time.Now() - defer executionOptimisticBlockTimer.UpdateSince(executionStart) - - o.CommitmentUpdateLock().Lock() - // get the soft block softBlock := o.Bc().CurrentSafeBlock() - o.CommitmentUpdateLock().Unlock() - o.BlockExecutionLock().Lock() nextFeeRecipient := o.NextFeeRecipient() - o.BlockExecutionLock().Unlock() // the height that this block will be at height := o.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess := types.Transactions{} - for _, tx := range req.Transactions { - unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, o.BridgeAddresses(), o.BridgeAllowedAssets()) - if err != nil { - log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) - continue - } - - err = o.Eth().TxPool().ValidateTx(unmarshalledTx) - if err != nil { - log.Debug("failed to validate tx, ignoring", "tx", tx, "err", err) - continue - } + addressPrefix := o.Bc().Config().AstriaSequencerAddressPrefix - txsToProcess = append(txsToProcess, unmarshalledTx) - } + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes(), o.AuctioneerAddress(), addressPrefix) // Build a payload to add to the chain payloadAttributes := &miner.BuildPayloadArgs{ @@ -203,13 +207,13 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, payload, err := o.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) - return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") + return nil, status.Errorf(codes.InvalidArgument, shared.WrapError(err, "failed to build payload").Error()) } block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil) if err != nil { log.Error("failed to convert executable data to block", err) - return nil, status.Error(codes.Internal, "failed to execute block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to convert executable data to block").Error()) } // this will insert the optimistic block into the chain and persist it's state without @@ -217,11 +221,13 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, err = o.Bc().InsertBlockWithoutSetHead(block) if err != nil { log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", block.ParentHash(), "err", err) - return nil, status.Error(codes.Internal, "failed to insert block to chain") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to insert block to chain").Error()) } // we store a pointer to the optimistic block in the chain so that we can use it // to retrieve the state of the optimistic block + // this method also sends an event which indicates that a new optimistic block has been set + // the mempool clearing logic is triggered when this event is received o.Bc().SetOptimistic(block) res := &astriaPb.Block{ @@ -233,8 +239,9 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, }, } + optimisticBlockHeight.Update(int64(block.NumberU64())) + log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) - executeOptimisticBlockSuccessCount.Inc(1) return res, nil } @@ -290,3 +297,7 @@ func (s *OptimisticServiceV1Alpha1) BridgeAllowedAssets() map[string]struct{} { func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } + +func (s *OptimisticServiceV1Alpha1) AuctioneerAddress() string { + return s.sharedServiceContainer.AuctioneerAddress() +} diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index cbcb562fc..538b0433b 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -1,7 +1,7 @@ package optimistic import ( - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" @@ -23,8 +23,8 @@ import ( "time" ) -func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { - ethService, _ := shared.SetupSharedService(t, 10) +func TestOptimisticServiceServerV1Alpha1_ExecuteOptimisticBlock(t *testing.T) { + ethService, _, _, _ := shared.SetupSharedService(t, 10) tests := []struct { description string @@ -66,7 +66,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - ethservice, sharedService := shared.SetupSharedService(t, 10) + ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10) // reset the blockchain with each test optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) @@ -193,8 +193,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { } } -func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { - ethservice, sharedService := shared.SetupSharedService(t, 10) +func TestNewOptimisticServiceServerV1Alpha_StreamBids(t *testing.T) { + ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) executionServiceV1 := execution.SetupExecutionService(t, sharedService) @@ -286,13 +286,13 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { require.Equal(t, pending, 0, "Mempool should have 0 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBundleStreamResponse]{ - sentResponses: []*optimsticPb.GetBundleStreamResponse{}, + mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBidStreamResponse]{ + sentResponses: []*optimsticPb.GetBidStreamResponse{}, } errorCh = make(chan error) go func() { - errorCh <- optimisticServiceV1Alpha1.GetBundleStream(&optimsticPb.GetBundleStreamRequest{}, &mockServerSideStreaming) + errorCh <- optimisticServiceV1Alpha1.GetBidStream(&optimsticPb.GetBidStreamRequest{}, &mockServerSideStreaming) }() stateDb, err := ethservice.BlockChain().StateAt(currentOptimisticBlock.Root) @@ -334,31 +334,31 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { select { case err := <-errorCh: - require.ErrorContains(t, err, "error waiting for pending transactions") + require.ErrorContains(t, err, "tx pool subscription closed") } require.Len(t, mockServerSideStreaming.sentResponses, 5, "Number of responses should match the number of requests") txIndx := 0 for _, resp := range mockServerSideStreaming.sentResponses { - bundle := resp.GetBundle() + bid := resp.GetBid() - require.Len(t, bundle.Transactions, 1, "Bundle should have 1 tx") + require.Len(t, bid.Transactions, 1, "Bid should have 1 tx") - receivedTx := bundle.Transactions[0] + receivedTx := bid.Transactions[0] sentTx := txs[txIndx] marshalledSentTx, err := sentTx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") require.True(t, bytes.Equal(receivedTx, marshalledSentTx), "Received tx does not match sent tx") txIndx += 1 - require.True(t, bytes.Equal(bundle.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") - require.True(t, bytes.Equal(bundle.BaseSequencerBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") + require.True(t, bytes.Equal(bid.RollupParentBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") + require.True(t, bytes.Equal(bid.SequencerParentBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") } } -func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing.T) { - ethservice, sharedService := shared.SetupSharedService(t, 10) +func TestOptimisticServiceServerV1_StreamExecuteOptimisticBlock(t *testing.T) { + ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) executionServiceV1 := execution.SetupExecutionService(t, sharedService) diff --git a/grpc/optimistic/validation.go b/grpc/optimistic/validation.go index a59420d73..cbd6c62e6 100644 --- a/grpc/optimistic/validation.go +++ b/grpc/optimistic/validation.go @@ -1,7 +1,7 @@ package optimistic import ( - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" "fmt" ) diff --git a/grpc/shared/bech32m.go b/grpc/shared/bech32m.go new file mode 100644 index 000000000..f4c5237ac --- /dev/null +++ b/grpc/shared/bech32m.go @@ -0,0 +1,92 @@ +package shared + +// Copied from astria-cli-go bech32m module (https://github.com/astriaorg/astria-cli-go/blob/d5ef82f718325b2907634c108d42b503211c20e6/modules/bech32m/bech32m.go#L1) +// TODO: organize the bech32m usage throughout the codebase + +import ( + "crypto/ed25519" + "crypto/sha256" + "fmt" + + "github.com/btcsuite/btcd/btcutil/bech32" +) + +type Address struct { + address string + prefix string + bytes [20]byte +} + +// String returns the bech32m address as a string +func (a *Address) String() string { + return a.address +} + +// Prefix returns the prefix of the bech32m address +func (a *Address) Prefix() string { + return a.prefix +} + +// Bytes returns the underlying bytes for the bech32m address as a [20]byte array +func (a *Address) Bytes() [20]byte { + return a.bytes +} + +// ValidateBech32mAddress verifies that a string in a valid bech32m address. It +// will return nil if the address is valid, otherwise it will return an error. +func ValidateBech32mAddress(address string, intendedPrefix string) error { + prefix, byteAddress, version, err := bech32.DecodeGeneric(address) + if err != nil { + return fmt.Errorf("address must be a bech32 encoded string") + } + if version != bech32.VersionM { + return fmt.Errorf("address must be a bech32m address") + } + byteAddress, err = bech32.ConvertBits(byteAddress, 5, 8, false) + if err != nil { + return fmt.Errorf("failed to convert address to 8 bit") + } + if prefix == "" { + return fmt.Errorf("address must have prefix") + } + if prefix != intendedPrefix { + return fmt.Errorf("address must have prefix %s", intendedPrefix) + } + + if len(byteAddress) != 20 { + return fmt.Errorf("address must decode to a 20 length byte array: got len %d", len(byteAddress)) + } + + return nil +} + +// EncodeFromBytes creates a *Address from a [20]byte array and string +// prefix. +func EncodeFromBytes(prefix string, data [20]byte) (string, error) { + // Convert the data from 8-bit groups to 5-bit + convertedBytes, err := bech32.ConvertBits(data[:], 8, 5, true) + if err != nil { + return "", fmt.Errorf("failed to convert bits from 8-bit groups to 5-bit groups: %v", err) + } + + // Encode the data as bech32m + address, err := bech32.EncodeM(prefix, convertedBytes) + if err != nil { + return "", fmt.Errorf("failed to encode address as bech32m: %v", err) + } + + return address, nil +} + +// EncodeFromPublicKey takes an ed25519 public key and string prefix and encodes +// them into a *Address. +func EncodeFromPublicKey(prefix string, pubkey ed25519.PublicKey) (string, error) { + hash := sha256.Sum256(pubkey) + var addr [20]byte + copy(addr[:], hash[:20]) + address, err := EncodeFromBytes(prefix, addr) + if err != nil { + return "", err + } + return address, nil +} diff --git a/grpc/shared/container.go b/grpc/shared/container.go index 04e4568e5..263971c5e 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -1,14 +1,15 @@ package shared import ( - "errors" "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/pkg/errors" "sync" + "sync/atomic" ) type SharedServiceContainer struct { @@ -24,8 +25,10 @@ type SharedServiceContainer struct { bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty - // TODO: bharath - we could make this an atomic pointer??? - nextFeeRecipient common.Address // Fee recipient for the next block + // auctioneer address is a bech32m address + auctioneerAddress atomic.Pointer[string] + + nextFeeRecipient atomic.Pointer[common.Address] // Fee recipient for the next block } func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, error) { @@ -84,11 +87,11 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro // To decrease compute cost, we identify the next fee recipient at the start // and update it as we execute blocks. nextFeeRecipient := common.Address{} + nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1 if bc.Config().AstriaFeeCollectors == nil { log.Warn("fee asset collectors not set, assets will be burned") } else { maxHeightCollectorMatch := uint32(0) - nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1 for height, collector := range bc.Config().AstriaFeeCollectors { if height <= nextBlock && height > maxHeightCollectorMatch { maxHeightCollectorMatch = height @@ -96,14 +99,34 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro } } } + + auctioneerAddressesBlockMap := bc.Config().AstriaAuctioneerAddresses + auctioneerAddress := "" + if auctioneerAddressesBlockMap == nil { + return nil, errors.New("auctioneer addresses not set") + } else { + maxHeightCollectorMatch := uint32(0) + for height, address := range auctioneerAddressesBlockMap { + if height <= nextBlock && height > maxHeightCollectorMatch { + maxHeightCollectorMatch = height + if err := ValidateBech32mAddress(address, bc.Config().AstriaSequencerAddressPrefix); err != nil { + return nil, errors.Wrapf(err, "auctioneer address %s at height %d is invalid", address, height) + } + auctioneerAddress = address + } + } + } + sharedServiceContainer := &SharedServiceContainer{ eth: eth, bc: bc, bridgeAddresses: bridgeAddresses, bridgeAllowedAssets: bridgeAllowedAssets, - nextFeeRecipient: nextFeeRecipient, } + sharedServiceContainer.SetAuctioneerAddress(auctioneerAddress) + sharedServiceContainer.SetNextFeeRecipient(nextFeeRecipient) + return sharedServiceContainer, nil } @@ -144,12 +167,12 @@ func (s *SharedServiceContainer) BlockExecutionLock() *sync.Mutex { } func (s *SharedServiceContainer) NextFeeRecipient() common.Address { - return s.nextFeeRecipient + return *s.nextFeeRecipient.Load() } // assumes that the block execution lock is being held func (s *SharedServiceContainer) SetNextFeeRecipient(nextFeeRecipient common.Address) { - s.nextFeeRecipient = nextFeeRecipient + s.nextFeeRecipient.Store(&nextFeeRecipient) } func (s *SharedServiceContainer) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig { @@ -159,3 +182,11 @@ func (s *SharedServiceContainer) BridgeAddresses() map[string]*params.AstriaBrid func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} { return s.bridgeAllowedAssets } + +func (s *SharedServiceContainer) AuctioneerAddress() string { + return *s.auctioneerAddress.Load() +} + +func (s *SharedServiceContainer) SetAuctioneerAddress(newAddress string) { + s.auctioneerAddress.Store(&newAddress) +} diff --git a/grpc/shared/test_setup.go b/grpc/shared/test_setup.go index 5fb0aec21..45078ea15 100644 --- a/grpc/shared/test_setup.go +++ b/grpc/shared/test_setup.go @@ -1,15 +1,16 @@ package shared import ( + "crypto/ed25519" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/stretchr/testify/require" "testing" ) -func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer) { +func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer, ed25519.PrivateKey, ed25519.PublicKey) { t.Helper() - genesis, blocks, bridgeAddress, feeCollectorKey := GenerateMergeChain(noOfBlocksToGenerate, true) + genesis, blocks, bridgeAddress, feeCollectorKey, auctioneerPrivKey, auctioneerPubKey := GenerateMergeChain(noOfBlocksToGenerate, true) ethservice := StartEthService(t, genesis) sharedService, err := NewSharedServiceContainer(ethservice) @@ -28,5 +29,8 @@ func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, _, err = ethservice.BlockChain().InsertChain(blocks) require.Nil(t, err, "can't insert blocks") - return ethservice, sharedService + // FIXME - this interface isn't right for the tests, we shouldn't be exposing the auctioneer priv key like this + // we should instead allow the test to create it and pass it to the shared service container in the constructor + // but that can make the codebase a bit weird, so we can leave it like this for now + return ethservice, sharedService, auctioneerPrivKey, auctioneerPubKey } diff --git a/grpc/shared/test_utils.go b/grpc/shared/test_utils.go index 82033ea5c..69926120f 100644 --- a/grpc/shared/test_utils.go +++ b/grpc/shared/test_utils.go @@ -3,6 +3,7 @@ package shared import ( primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" "crypto/ecdsa" + "crypto/ed25519" "math/big" "testing" "time" @@ -36,7 +37,7 @@ var ( testBalance = big.NewInt(2e18) ) -func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey) { +func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey, ed25519.PrivateKey, ed25519.PublicKey) { config := *params.AllEthashProtocolChanges engine := consensus.Engine(beaconConsensus.New(ethash.NewFaker())) if merged { @@ -61,6 +62,18 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri config.AstriaCelestiaInitialHeight = 10 config.AstriaCelestiaHeightVariance = 10 + auctioneerPubKey, auctioneerPrivKey, err := ed25519.GenerateKey(nil) + if err != nil { + panic(err) + } + auctioneerAddress, err := EncodeFromPublicKey(config.AstriaSequencerAddressPrefix, auctioneerPubKey) + if err != nil { + panic(err) + } + + config.AstriaAuctioneerAddresses = make(map[uint32]string) + config.AstriaAuctioneerAddresses[1] = auctioneerAddress + bech32mBridgeAddress, err := bech32.EncodeM(config.AstriaSequencerAddressPrefix, bridgeAddressBytes) if err != nil { panic(err) @@ -114,12 +127,14 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri config.TerminalTotalDifficulty = totalDifficulty } - return genesis, blocks, bech32mBridgeAddress, feeCollectorKey + return genesis, blocks, bech32mBridgeAddress, feeCollectorKey, auctioneerPrivKey, auctioneerPubKey } // startEthService creates a full node instance for testing. func StartEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum { - n, err := node.New(&node.Config{}) + n, err := node.New(&node.Config{ + EnableAuctioneer: true, + }) require.Nil(t, err, "can't create node") mcfg := miner.DefaultConfig mcfg.PendingFeeRecipient = TestAddr diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index ccb0a9961..90ae13619 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -1,18 +1,40 @@ package shared import ( + auctionv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" + "bytes" + "crypto/ed25519" "crypto/sha256" + "errors" "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/contracts" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" + "github.com/golang/protobuf/proto" + proto2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" "math/big" + "time" ) +var ( + successfulUnbundledAllocations = metrics.GetOrRegisterGauge("astria/optimistic/successful_unbundled_allocations", nil) + allocationsWithInvalidPrevBlockHash = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_prev_block_hash", nil) + allocationsWithInvalidPubKey = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_pub_key", nil) + allocationsWithInvalidSignature = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_signature", nil) + + allocationUnbundlingTimer = metrics.GetOrRegisterTimer("astria/optimistic/allocation_unbundling_time", nil) +) + +func WrapError(err error, msg string) error { + return fmt.Errorf("%s: %w", msg, err) +} + func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { lo := big.NewInt(0).SetUint64(u128.Lo) hi := big.NewInt(0).SetUint64(u128.Hi) @@ -20,95 +42,212 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } -// `validateAndUnmarshalSequencerTx` validates and unmarshals the given rollup sequencer transaction. -// If the sequencer transaction is a deposit tx, we ensure that the asset ID is allowed and the bridge address is known. -// If the sequencer transaction is not a deposit tx, we unmarshal the sequenced data into an Ethereum transaction. We ensure that the -// tx is not a blob tx or a deposit tx. -func ValidateAndUnmarshalSequencerTx( +func validateAndUnmarshalDepositTx( + deposit *sequencerblockv1.Deposit, height uint64, - tx *sequencerblockv1.RollupData, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, - bridgeAllowedAssets map[string]struct{}, -) (*types.Transaction, error) { - if deposit := tx.GetDeposit(); deposit != nil { - bridgeAddress := deposit.BridgeAddress.GetBech32M() - bac, ok := bridgeAddresses[bridgeAddress] - if !ok { - return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress) - } - - if height < uint64(bac.StartHeight) { - return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight) - } - - if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok { - return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset) - } + bridgeAllowedAssets map[string]struct{}) (*types.Transaction, error) { + bridgeAddress := deposit.BridgeAddress.GetBech32M() + bac, ok := bridgeAddresses[bridgeAddress] + if !ok { + return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress) + } - if deposit.Asset != bac.AssetDenom { - return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress) - } + if height < uint64(bac.StartHeight) { + return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight) + } - recipient := common.HexToAddress(deposit.DestinationChainAddress) - amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount)) + if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok { + return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset) + } - if bac.Erc20Asset != nil { - log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress) - abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi() - if err != nil { - // this should never happen, as the abi is hardcoded in the contract bindings - return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err) - } + if deposit.Asset != bac.AssetDenom { + return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress) + } - // pack arguments for calling the `mint` function on the ERC20 contract - args := []interface{}{recipient, amount} - calldata, err := abi.Pack("mint", args...) - if err != nil { - return nil, err - } + recipient := common.HexToAddress(deposit.DestinationChainAddress) + amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount)) - txdata := types.DepositTx{ - From: bac.SenderAddress, - Value: new(big.Int), // don't need to set this, as we aren't minting the native asset - // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer. - // - // the fees are spent from the "bridge account" which is not actually a real account, but is instead some - // address defined by consensus, so the gas cost is not actually deducted from any account. - Gas: 64000, - To: &bac.Erc20Asset.ContractAddress, - Data: calldata, - SourceTransactionId: *deposit.SourceTransactionId, - SourceTransactionIndex: deposit.SourceActionIndex, - } + if bac.Erc20Asset != nil { + log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress) + abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi() + if err != nil { + // this should never happen, as the abi is hardcoded in the contract bindings + return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err) + } - tx := types.NewTx(&txdata) - return tx, nil + // pack arguments for calling the `mint` function on the ERC20 contract + args := []interface{}{recipient, amount} + calldata, err := abi.Pack("mint", args...) + if err != nil { + return nil, err } txdata := types.DepositTx{ - From: bac.SenderAddress, - To: &recipient, - Value: amount, - Gas: 0, + From: bac.SenderAddress, + Value: new(big.Int), // don't need to set this, as we aren't minting the native asset + // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer. + // + // the fees are spent from the "bridge account" which is not actually a real account, but is instead some + // address defined by consensus, so the gas cost is not actually deducted from any account. + Gas: 64000, + To: &bac.Erc20Asset.ContractAddress, + Data: calldata, SourceTransactionId: *deposit.SourceTransactionId, SourceTransactionIndex: deposit.SourceActionIndex, } - return types.NewTx(&txdata), nil - } else { - ethTx := new(types.Transaction) - err := ethTx.UnmarshalBinary(tx.GetSequencedData()) + + tx := types.NewTx(&txdata) + return tx, nil + } + + txdata := types.DepositTx{ + From: bac.SenderAddress, + To: &recipient, + Value: amount, + Gas: 0, + SourceTransactionId: *deposit.SourceTransactionId, + SourceTransactionIndex: deposit.SourceActionIndex, + } + return types.NewTx(&txdata), nil +} + +func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*types.Transaction, error) { + ethTx := new(types.Transaction) + err := ethTx.UnmarshalBinary(tx.GetSequencedData()) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData())) + } + + if ethTx.Type() == types.DepositTxType { + return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) + } + + if ethTx.Type() == types.BlobTxType { + return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) + } + + return ethTx, nil +} + +func unmarshallAllocationTxs(allocation *auctionv1alpha1.Allocation, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) (types.Transactions, error) { + unbundlingStart := time.Now() + defer allocationUnbundlingTimer.UpdateSince(unbundlingStart) + + processedTxs := types.Transactions{} + bid := &auctionv1alpha1.Bid{} + + unprocessedBid := allocation.GetBid() + + err := anypb.UnmarshalTo(unprocessedBid, bid, proto2.UnmarshalOptions{ + Merge: false, + AllowPartial: false, + }) + if err != nil { + return nil, WrapError(err, "failed to unmarshal bid") + } + + log.Debug("Found a potential allocation in the rollup data. Checking if it is valid.", "prevBlockHash", common.BytesToHash(prevBlockHash).String(), "auctioneerBech32Address", auctioneerBech32Address) + + if !bytes.Equal(bid.GetRollupParentBlockHash(), prevBlockHash) { + allocationsWithInvalidPrevBlockHash.Inc(1) + return nil, errors.New("prev block hash in allocation does not match the previous block hash") + } + + publicKey := ed25519.PublicKey(allocation.GetPublicKey()) + bech32Address, err := EncodeFromPublicKey(addressPrefix, publicKey) + if err != nil { + return nil, WrapError(err, fmt.Sprintf("failed to encode public key to bech32m address: %s", publicKey)) + } + + if auctioneerBech32Address != bech32Address { + allocationsWithInvalidPubKey.Inc(1) + return nil, fmt.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address) + } + + message, err := proto.Marshal(bid) + if err != nil { + return nil, WrapError(err, "failed to marshal allocation to verify signature") + } + + signature := allocation.GetSignature() + if !ed25519.Verify(publicKey, message, signature) { + allocationsWithInvalidSignature.Inc(1) + return nil, fmt.Errorf("signature in allocation does not match the public key") + } + + log.Debug("Allocation is valid. Unmarshalling the transactions in the bid.") + // unmarshall the transactions in the bid + for _, allocationTx := range bid.GetTransactions() { + ethtx := new(types.Transaction) + err := ethtx.UnmarshalBinary(allocationTx) if err != nil { - return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData())) + return nil, WrapError(err, "failed to unmarshall allocation transaction") } + processedTxs = append(processedTxs, ethtx) + } - if ethTx.Type() == types.DepositTxType { - return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) - } + successfulUnbundledAllocations.Inc(1) - if ethTx.Type() == types.BlobTxType { - return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) - } + return processedTxs, nil + +} + +// `UnbundleRollupDataTransactions` takes in a list of rollup data transactions and returns a list of Ethereum transactions. +// TODO - this function has become too big. we should start breaking it down +func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, + bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) types.Transactions { - return ethTx, nil + processedTxs := types.Transactions{} + allocationTxs := types.Transactions{} + // we just return the allocation here and do not unmarshall the transactions in the bid if we find it + var allocation *auctionv1alpha1.Allocation + for _, tx := range txs { + if deposit := tx.GetDeposit(); deposit != nil { + depositTx, err := validateAndUnmarshalDepositTx(deposit, height, bridgeAddresses, bridgeAllowedAssets) + if err != nil { + log.Error("failed to validate and unmarshal deposit tx", "error", err) + continue + } + + processedTxs = append(processedTxs, depositTx) + } else { + sequenceData := tx.GetSequencedData() + // check if sequence data is of type Allocation + if allocation == nil { + // TODO - check if we can avoid a temp value + tempAllocation := &auctionv1alpha1.Allocation{} + err := proto.Unmarshal(sequenceData, tempAllocation) + if err == nil { + unmarshalledAllocationTxs, err := unmarshallAllocationTxs(tempAllocation, prevBlockHash, auctioneerBech32Address, addressPrefix) + if err != nil { + log.Error("failed to unmarshall allocation transactions", "error", err) + continue + } + + allocation = tempAllocation + allocationTxs = unmarshalledAllocationTxs + } else { + ethtx, err := validateAndUnmarshallSequenceAction(tx) + if err != nil { + log.Error("failed to unmarshall sequence action", "error", err) + continue + } + processedTxs = append(processedTxs, ethtx) + } + } else { + ethtx, err := validateAndUnmarshallSequenceAction(tx) + if err != nil { + log.Error("failed to unmarshall sequence action", "error", err) + continue + } + processedTxs = append(processedTxs, ethtx) + } + } } + + // prepend allocation txs to processedTxs + processedTxs = append(allocationTxs, processedTxs...) + + return processedTxs } diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index a46032f4e..3292d5334 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -1,6 +1,12 @@ package shared import ( + auctionv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" + "bytes" + "crypto/ecdsa" + "crypto/ed25519" + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" "math/big" "testing" @@ -15,6 +21,34 @@ import ( "github.com/stretchr/testify/require" ) +type allocationInfo struct { + signature []byte + publicKey []byte + bid *auctionv1alpha1.Bid +} + +func (a *allocationInfo) convertToAllocation() (*auctionv1alpha1.Allocation, error) { + convertedBid, err := anypb.New(a.bid) + if err != nil { + return nil, err + } + + return &auctionv1alpha1.Allocation{ + Signature: a.signature, + PublicKey: a.publicKey, + Bid: convertedBid, + }, nil +} + +func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Transaction { + return pricedTransaction(nonce, gaslimit, big.NewInt(1), key) +} + +func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) + return tx +} + func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { lo := i.Uint64() hi := new(big.Int).Rsh(i, 64).Uint64() @@ -58,21 +92,136 @@ func generateBech32MAddress() string { return bech32m } -func TestSequenceTxValidation(t *testing.T) { - ethservice, serviceV1Alpha1 := SetupSharedService(t, 10) +func TestUnmarshallAllocationTxs(t *testing.T) { + ethService, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) + addressPrefix := ethService.BlockChain().Config().AstriaSequencerAddressPrefix - blobTx, err := testBlobTx().MarshalBinary() - require.Nil(t, err, "failed to marshal random blob tx: %v", err) + tx1 := transaction(0, 1000, TestKey) + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) - depositTx, err := testDepositTx().MarshalBinary() - require.Nil(t, err, "failed to marshal random deposit tx: %v", err) + tx2 := transaction(1, 1000, TestKey) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + tx3 := transaction(2, 1000, TestKey) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) - unsignedTx := types.NewTransaction(uint64(0), common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"), big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), TestKey) - require.Nil(t, err, "failed to sign tx: %v", err) + validBid := &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), + } + + marshalledAllocation, err := proto.Marshal(validBid) + require.NoError(t, err, "failed to marshal payload: %v", err) + + signedAllocation, err := auctioneerPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) - validMarshalledTx, err := tx.MarshalBinary() - require.Nil(t, err, "failed to marshal valid tx: %v", err) + tests := []struct { + description string + + allocationInfo allocationInfo + + prevBlockHash []byte + expectedOutput types.Transactions + // just check if error contains the string since error contains other details + wantErr string + }{ + { + description: "previous block hash mismatch", + allocationInfo: allocationInfo{ + signature: make([]byte, 0), + publicKey: make([]byte, 0), + bid: &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), + }, + }, + prevBlockHash: []byte("not prev rollup block hash"), + expectedOutput: types.Transactions{}, + wantErr: "prev block hash in allocation does not match the previous block hash", + }, + { + description: "public key doesn't match", + allocationInfo: allocationInfo{ + signature: []byte("invalid signature"), + publicKey: []byte("invalid public key"), + bid: &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), + }, + }, + prevBlockHash: []byte("prev rollup block hash"), + expectedOutput: types.Transactions{}, + wantErr: "address in allocation does not match auctioneer address", + }, + { + description: "invalid signature", + allocationInfo: allocationInfo{ + signature: []byte("invalid signature"), + publicKey: auctioneerPubKey, + bid: &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), + }, + }, + prevBlockHash: []byte("prev rollup block hash"), + expectedOutput: types.Transactions{}, + wantErr: "signature in allocation does not match the public key", + }, + { + description: "valid allocation", + allocationInfo: allocationInfo{ + signature: signedAllocation, + publicKey: auctioneerPubKey, + bid: validBid, + }, + prevBlockHash: []byte("prev rollup block hash"), + expectedOutput: types.Transactions{tx1, tx2, tx3}, + wantErr: "", + }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + allocation, err := test.allocationInfo.convertToAllocation() + require.NoError(t, err, "failed to convert allocation info to allocation: %v", err) + + finalTxs, err := unmarshallAllocationTxs(allocation, test.prevBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) + if test.wantErr == "" && err == nil { + for _, tx := range test.expectedOutput { + foundTx := false + for _, finalTx := range finalTxs { + if bytes.Equal(finalTx.Hash().Bytes(), tx.Hash().Bytes()) { + foundTx = true + } + } + + require.True(t, foundTx, "expected tx not found in final txs") + } + return + } + require.False(t, test.wantErr == "" && err != nil, "expected error, got nil") + require.Contains(t, err.Error(), test.wantErr) + }) + } +} + +func TestValidateAndUnmarshallDepositTx(t *testing.T) { + ethservice, serviceV1Alpha1, _, _ := SetupSharedService(t, 10) chainDestinationKey, err := crypto.GenerateKey() require.Nil(t, err, "failed to generate chain destination key: %v", err) @@ -92,40 +241,13 @@ func TestSequenceTxValidation(t *testing.T) { tests := []struct { description string - sequencerTx *sequencerblockv1.RollupData + sequencerTx *sequencerblockv1.Deposit // just check if error contains the string since error contains other details wantErr string }{ - { - description: "unmarshallable sequencer tx", - sequencerTx: &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: []byte("unmarshallable tx"), - }, - }, - wantErr: "failed to unmarshal sequenced data into transaction", - }, - { - description: "blob type sequence tx", - sequencerTx: &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: blobTx, - }, - }, - wantErr: "blob tx not allowed in sequenced data", - }, - { - description: "deposit type sequence tx", - sequencerTx: &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: depositTx, - }, - }, - wantErr: "deposit tx not allowed in sequenced data", - }, { description: "deposit tx with an unknown bridge address", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: generateBech32MAddress(), }, @@ -137,12 +259,12 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "unknown bridge address", }, { description: "deposit tx with a disallowed asset id", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: bridgeAddress, }, @@ -154,12 +276,12 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "disallowed asset", }, { description: "deposit tx with a height and asset below the bridge start height", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: invalidHeightBridgeAddressBech32m, }, @@ -171,12 +293,12 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "not allowed before height", }, { description: "valid deposit tx", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: bridgeAddress, }, @@ -188,9 +310,67 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "", }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + _, err := validateAndUnmarshalDepositTx(test.sequencerTx, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets()) + if test.wantErr == "" && err == nil { + return + } + require.False(t, test.wantErr == "" && err != nil, "expected error, got nil") + require.Contains(t, err.Error(), test.wantErr) + }) + } +} + +func TestValidateAndUnmarshallSequenceAction(t *testing.T) { + blobTx, err := testBlobTx().MarshalBinary() + require.Nil(t, err, "failed to marshal random blob tx: %v", err) + + depositTx, err := testDepositTx().MarshalBinary() + require.Nil(t, err, "failed to marshal random deposit tx: %v", err) + + tx1 := transaction(0, 1000, TestKey) + validMarshalledTx, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + tests := []struct { + description string + sequencerTx *sequencerblockv1.RollupData + // just check if error contains the string since errors can contains other details + wantErr string + }{ + { + description: "unmarshallable sequencer tx", + sequencerTx: &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: []byte("unmarshallable tx"), + }, + }, + wantErr: "failed to unmarshal sequenced data into transaction", + }, + { + description: "blob type sequence tx", + sequencerTx: &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: blobTx, + }, + }, + wantErr: "blob tx not allowed in sequenced data", + }, + { + description: "deposit type sequence tx", + sequencerTx: &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: depositTx, + }, + }, + wantErr: "deposit tx not allowed in sequenced data", + }, { description: "valid sequencer tx", sequencerTx: &sequencerblockv1.RollupData{ @@ -202,7 +382,7 @@ func TestSequenceTxValidation(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - _, err := ValidateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets()) + _, err := validateAndUnmarshallSequenceAction(test.sequencerTx) if test.wantErr == "" && err == nil { return } @@ -211,3 +391,357 @@ func TestSequenceTxValidation(t *testing.T) { }) } } + +func TestUnbundleRollupData(t *testing.T) { + ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) + + addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix + + baseSequencerBlockHash := []byte("sequencer block hash") + prevRollupBlockHash := []byte("prev rollup block hash") + + // txs in + tx1 := transaction(0, 1000, TestKey) + tx2 := transaction(1, 1000, TestKey) + tx3 := transaction(2, 1000, TestKey) + tx4 := transaction(3, 1000, TestKey) + tx5 := transaction(4, 1000, TestKey) + + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx4, err := tx4.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx5, err := tx5.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + bid := &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, + } + + marshalledBid, err := proto.Marshal(bid) + require.NoError(t, err, "failed to marshal payload: %v", err) + signedBid, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign payload: %v", err) + + // TODO - we need better naming here! + finalBid, err := anypb.New(bid) + + allocation := &auctionv1alpha1.Allocation{ + Signature: signedBid, + PublicKey: auctioneerPubKey, + Bid: finalBid, + } + + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + allocationSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAllocation, + }, + } + seqData1 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx4, + }, + } + seqData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx5, + }, + } + + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + chainDestinationKey, err := crypto.GenerateKey() + require.Nil(t, err, "failed to generate chain destination key: %v", err) + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey) + + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)), + RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)}, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, depositTx} + + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) + + require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") + + // allocation txs should be the first 3 + require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") + require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") + require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") + require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth") + require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") +} + +func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { + ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) + addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix + + baseSequencerBlockHash := []byte("sequencer block hash") + prevRollupBlockHash := []byte("prev rollup block hash") + + // txs in + tx1 := transaction(0, 1000, TestKey) + tx2 := transaction(1, 1000, TestKey) + tx3 := transaction(2, 1000, TestKey) + tx4 := transaction(3, 1000, TestKey) + tx5 := transaction(4, 1000, TestKey) + + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx4, err := tx4.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx5, err := tx5.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + bid := &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, + } + + marshalledBid, err := proto.Marshal(bid) + require.NoError(t, err, "failed to marshal payload: %v", err) + signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign payload: %v", err) + + finalBid, err := anypb.New(bid) + require.NoError(t, err, "failed to convert bid to anypb: %v", err) + + allocation := &auctionv1alpha1.Allocation{ + Signature: signedPayload, + PublicKey: auctioneerPubKey, + Bid: finalBid, + } + + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + allocationSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAllocation, + }, + } + // this allocation should be ignored + allocationSequenceData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAllocation, + }, + } + seqData1 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx4, + }, + } + seqData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx5, + }, + } + + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + chainDestinationKey, err := crypto.GenerateKey() + require.Nil(t, err, "failed to generate chain destination key: %v", err) + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey) + + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)), + RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)}, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, allocationSequenceData2, depositTx} + + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) + + require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") + + // allocation txs should be the first 3 + require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") + require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") + require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") + require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth") + require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") +} + +func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { + ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) + addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix + + baseSequencerBlockHash := []byte("sequencer block hash") + prevRollupBlockHash := []byte("prev rollup block hash") + + _, invalidAuctioneerprivkey, err := ed25519.GenerateKey(nil) + require.Nil(t, err, "failed to generate invalid auctioneer key: %v", err) + + // txs in + tx1 := transaction(0, 1000, TestKey) + tx2 := transaction(1, 1000, TestKey) + tx3 := transaction(2, 1000, TestKey) + tx4 := transaction(3, 1000, TestKey) + tx5 := transaction(4, 1000, TestKey) + + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx4, err := tx4.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx5, err := tx5.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + // transactions that the attacker is trying to get into the top of block + invalidTx1 := transaction(5, 1000, TestKey) + invalidMarshalledTx1, err := invalidTx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + invalidTx2 := transaction(6, 1000, TestKey) + invalidMarshalledTx2, err := invalidTx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + bid := &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, + } + validBidAny, err := anypb.New(bid) + require.NoError(t, err, "failed to convert bid to anypb: %v", err) + + marshalledBid, err := proto.Marshal(bid) + require.NoError(t, err, "failed to marshal allocation: %v", err) + signedBid, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) + + invalidBid := &auctionv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{invalidMarshalledTx1, invalidMarshalledTx2}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, + } + invalidBidAny, err := anypb.New(invalidBid) + require.NoError(t, err, "failed to convert bid to anypb: %v", err) + + marshalledInvalidBid, err := proto.Marshal(invalidBid) + require.NoError(t, err, "failed to marshal invalid allocation: %v", err) + + signedInvalidBid, err := invalidAuctioneerprivkey.Sign(nil, marshalledInvalidBid, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) + + allocation := &auctionv1alpha1.Allocation{ + Signature: signedBid, + PublicKey: auctioneerPubKey, + Bid: validBidAny, + } + + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + + invalidAllocation := &auctionv1alpha1.Allocation{ + Signature: signedInvalidBid, + // trying to spoof the actual auctioneer key + PublicKey: auctioneerPubKey, + Bid: invalidBidAny, + } + marshalledInvalidAllocation, err := proto.Marshal(invalidAllocation) + require.NoError(t, err, "failed to marshal invalid allocation: %v", err) + + allocationSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAllocation, + }, + } + // this allocation should be ignored + invalidAllocationSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledInvalidAllocation, + }, + } + seqData1 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx4, + }, + } + seqData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx5, + }, + } + + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + chainDestinationKey, err := crypto.GenerateKey() + require.Nil(t, err, "failed to generate chain destination key: %v", err) + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey) + + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)), + RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)}, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, invalidAllocationSequenceData, depositTx} + + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) + + require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") + + // allocation txs should be the first 3 + require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") + require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") + require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") + require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth") + require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") +} diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index ffa20fb62..7086f8156 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -834,6 +834,7 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m // - When blockNr is -2 the chain latest block is returned. // - When blockNr is -3 the chain finalized block is returned. // - When blockNr is -4 the chain safe block is returned. +// - When blockNr is -5 the chain optimistic block is returned. // - When fullTx is true all transactions in the block are returned, otherwise // only the transaction hash is returned. func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { diff --git a/miner/miner_test.go b/miner/miner_test.go index 3dc39f175..7cd74cb70 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -59,11 +59,12 @@ func (m *mockBackend) TxPool() *txpool.TxPool { } type testBlockChain struct { - root common.Hash - config *params.ChainConfig - statedb *state.StateDB - gasLimit uint64 - chainHeadFeed *event.Feed + root common.Hash + config *params.ChainConfig + statedb *state.StateDB + gasLimit uint64 + chainHeadFeed *event.Feed + chainOptimisticHeadFeed *event.Feed } func (bc *testBlockChain) Config() *params.ChainConfig { @@ -94,7 +95,7 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) } func (bc *testBlockChain) SubscribeChainOptimisticHeadEvent(ch chan<- core.ChainOptimisticHeadEvent) event.Subscription { - return bc.chainHeadFeed.Subscribe(ch) + return bc.chainOptimisticHeadFeed.Subscribe(ch) } func TestBuildPendingBlocks(t *testing.T) { @@ -161,10 +162,10 @@ func createMiner(t *testing.T) *Miner { t.Fatalf("can't create new chain %v", err) } statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) - blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)} + blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed), new(event.Feed)} - pool := legacypool.New(testTxPoolConfig, blockchain) - txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool}) + pool := legacypool.New(testTxPoolConfig, blockchain, true) + txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool}, true) // Create Miner backend := NewMockBackend(bc, txpool) diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index 3ba7b6ccc..8e7f46e3a 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -124,8 +124,8 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine if err != nil { t.Fatalf("core.NewBlockChain failed: %v", err) } - pool := legacypool.New(testTxPoolConfig, chain) - txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool}) + pool := legacypool.New(testTxPoolConfig, chain, true) + txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool}, true) return &testWorkerBackend{ db: db, diff --git a/node/config.go b/node/config.go index 9f83540b2..11a54c1e6 100644 --- a/node/config.go +++ b/node/config.go @@ -217,6 +217,8 @@ type Config struct { EnablePersonal bool `toml:"-"` DBEngine string `toml:",omitempty"` + + EnableAuctioneer bool `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/grpcstack.go b/node/grpcstack.go index 2f8d7b091..15000f9ab 100644 --- a/node/grpcstack.go +++ b/node/grpcstack.go @@ -1,7 +1,7 @@ package node import ( - optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc" "net" "sync" @@ -19,13 +19,15 @@ type GRPCServerHandler struct { execServer *grpc.Server executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer optimisticExecServ *optimisticGrpc.OptimisticExecutionServiceServer - streamBundleServ *optimisticGrpc.BundleServiceServer + auctionServiceServ *optimisticGrpc.AuctionServiceServer + + enableAuctioneer bool } // NewServer creates a new gRPC server. // It registers the execution service server. // It registers the gRPC server with the node so it can be stopped on shutdown. -func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, streamBundleServ optimisticGrpc.BundleServiceServer, cfg *Config) error { +func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, auctionServiceServ optimisticGrpc.AuctionServiceServer, cfg *Config) error { execServer := grpc.NewServer() log.Info("gRPC server enabled", "endpoint", cfg.GRPCEndpoint()) @@ -35,12 +37,15 @@ func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer execServer: execServer, executionServiceServerV1a2: &execServ, optimisticExecServ: &optimisticExecServ, - streamBundleServ: &streamBundleServ, + auctionServiceServ: &auctionServiceServ, + enableAuctioneer: cfg.EnableAuctioneer, } astriaGrpc.RegisterExecutionServiceServer(execServer, execServ) - optimisticGrpc.RegisterOptimisticExecutionServiceServer(execServer, optimisticExecServ) - optimisticGrpc.RegisterBundleServiceServer(execServer, streamBundleServ) + if cfg.EnableAuctioneer { + optimisticGrpc.RegisterOptimisticExecutionServiceServer(execServer, optimisticExecServ) + optimisticGrpc.RegisterAuctionServiceServer(execServer, auctionServiceServ) + } node.RegisterGRPCServer(serverHandler) return nil @@ -62,6 +67,7 @@ func (handler *GRPCServerHandler) Start() error { } go handler.execServer.Serve(tcpLis) + log.Info("gRPC server started", "endpoint", handler.endpoint) return nil } @@ -72,6 +78,7 @@ func (handler *GRPCServerHandler) Stop() error { defer handler.mu.Unlock() handler.execServer.GracefulStop() + log.Info("gRPC server stopped", "endpoint", handler.endpoint) return nil } diff --git a/node/node.go b/node/node.go index dc56b3361..0b19df5db 100644 --- a/node/node.go +++ b/node/node.go @@ -69,6 +69,8 @@ type Node struct { // grpc grpcServerHandler *GRPCServerHandler // Stores information about the grpc server + enableAuctioneer bool + databases map[*closeTrackingDB]struct{} // All open databases } @@ -159,6 +161,10 @@ func New(conf *Config) (*Node, error) { node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) node.ipc = newIPCServer(node.log, conf.IPCEndpoint()) + if conf.EnableAuctioneer { + node.enableAuctioneer = true + } + return node, nil } @@ -756,6 +762,10 @@ func (n *Node) EventMux() *event.TypeMux { return n.eventmux } +func (n *Node) AuctioneerEnabled() bool { + return n.enableAuctioneer +} + // OpenDatabase opens an existing database with the given name (or creates one if no // previous can be found) from within the node's instance directory. If the node is // ephemeral, a memory database is returned. diff --git a/params/config.go b/params/config.go index d9b40b881..7d9ee2a79 100644 --- a/params/config.go +++ b/params/config.go @@ -388,6 +388,7 @@ type ChainConfig struct { AstriaBridgeAddressConfigs []AstriaBridgeAddressConfig `json:"astriaBridgeAddresses,omitempty"` AstriaFeeCollectors map[uint32]common.Address `json:"astriaFeeCollectors"` AstriaEIP1559Params *AstriaEIP1559Params `json:"astriaEIP1559Params,omitempty"` + AstriaAuctioneerAddresses map[uint32]string `json:"astriaAuctioneerAddresses,omitempty"` } func (c *ChainConfig) AstriaExtraData() []byte { diff --git a/rpc/types.go b/rpc/types.go index 2e53174b8..249efc51a 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -63,11 +63,12 @@ type jsonWriter interface { type BlockNumber int64 const ( - SafeBlockNumber = BlockNumber(-4) - FinalizedBlockNumber = BlockNumber(-3) - LatestBlockNumber = BlockNumber(-2) - PendingBlockNumber = BlockNumber(-1) - EarliestBlockNumber = BlockNumber(0) + OptimisticBlockNumber = BlockNumber(-5) + SafeBlockNumber = BlockNumber(-4) + FinalizedBlockNumber = BlockNumber(-3) + LatestBlockNumber = BlockNumber(-2) + PendingBlockNumber = BlockNumber(-1) + EarliestBlockNumber = BlockNumber(0) ) // UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports: @@ -98,6 +99,9 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { case "safe": *bn = SafeBlockNumber return nil + case "optimistic": + *bn = OptimisticBlockNumber + return nil } blckNum, err := hexutil.DecodeUint64(input) @@ -135,6 +139,8 @@ func (bn BlockNumber) String() string { return "finalized" case SafeBlockNumber: return "safe" + case OptimisticBlockNumber: + return "optimistic" default: if bn < 0 { return fmt.Sprintf("", bn) @@ -188,6 +194,10 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { bn := SafeBlockNumber bnh.BlockNumber = &bn return nil + case "optimistic": + bn := OptimisticBlockNumber + bnh.BlockNumber = &bn + return nil default: if len(input) == 66 { hash := common.Hash{}