From a800a1620ca2a29b6ada99f2c18aab37530f90e7 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 24 Sep 2024 16:42:29 +0530 Subject: [PATCH 01/79] use optimistic head event in txpool's maintanance loop --- cmd/devp2p/internal/ethtest/suite_test.go | 2 ++ core/txpool/legacypool/legacypool_test.go | 15 ++++++++++----- core/txpool/txpool.go | 8 ++++---- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/cmd/devp2p/internal/ethtest/suite_test.go b/cmd/devp2p/internal/ethtest/suite_test.go index d70adda51..a02ff4acd 100644 --- a/cmd/devp2p/internal/ethtest/suite_test.go +++ b/cmd/devp2p/internal/ethtest/suite_test.go @@ -61,6 +61,7 @@ func TestEthSuite(t *testing.T) { if err != nil { t.Fatalf("could not create new test suite: %v", err) } + for _, test := range suite.EthTests() { t.Run(test.Name, func(t *testing.T) { if test.Slow && testing.Short() { @@ -149,5 +150,6 @@ func setupGeth(stack *node.Node, dir string) error { return fmt.Errorf("failed to register catalyst service: %v", err) } _, err = backend.BlockChain().InsertChain(chain.blocks[1:]) + backend.BlockChain().SetOptimistic(chain.blocks[len(chain.blocks)-1]) return err } diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 9eaa1bc54..e35a42fed 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -63,14 +63,15 @@ func init() { } type testBlockChain struct { - config *params.ChainConfig - gasLimit atomic.Uint64 - statedb *state.StateDB - chainHeadFeed *event.Feed + config *params.ChainConfig + gasLimit atomic.Uint64 + statedb *state.StateDB + chainHeadFeed *event.Feed + chainOptimisticHeadFee *event.Feed } func newTestBlockChain(config *params.ChainConfig, gasLimit uint64, statedb *state.StateDB, chainHeadFeed *event.Feed) *testBlockChain { - bc := testBlockChain{config: config, statedb: statedb, chainHeadFeed: new(event.Feed)} + bc := testBlockChain{config: config, statedb: statedb, chainHeadFeed: new(event.Feed), chainOptimisticHeadFee: new(event.Feed)} bc.gasLimit.Store(gasLimit) return &bc } @@ -98,6 +99,10 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) return bc.chainHeadFeed.Subscribe(ch) } +func (bc *testBlockChain) SubscribeChainOptimisticHeadEvent(ch chan<- core.ChainHeadEvent) event.Subscription { + return bc.chainOptimisticHeadFee.Subscribe(ch) +} + func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Transaction { return pricedTransaction(nonce, gaslimit, big.NewInt(1), key) } diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index f7eaeb78d..7767e205b 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -187,10 +187,10 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) { // Subscribe to chain head events to trigger subpool resets var ( - newHeadCh = make(chan core.ChainHeadEvent) - newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh) + newOptimisticHeadCh = make(chan core.ChainOptimisticHeadEvent) + newOptimisticHeadSub = chain.SubscribeChainOptimisticHeadEvent(newOptimisticHeadCh) ) - defer newHeadSub.Unsubscribe() + defer newOptimisticHeadSub.Unsubscribe() // Track the previous and current head to feed to an idle reset var ( @@ -244,7 +244,7 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) { } // Wait for the next chain head event or a previous reset finish select { - case event := <-newHeadCh: + case event := <-newOptimisticHeadCh: // Chain moved forward, store the head for later consumption newHead = event.Block.Header() From 0fac052c40982422816c3409bff4bbd2ca0cb837 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 24 Oct 2024 14:15:58 +0530 Subject: [PATCH 02/79] fix tests --- grpc/execution/server_test.go | 33 ++++++++------------------------- 1 file changed, 8 insertions(+), 25 deletions(-) diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 3567a521f..2dec2e861 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -658,15 +658,15 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") - ethservice.BlockChain().SetSafe(ethservice.BlockChain().CurrentBlock()) + previousBlockHeader := ethservice.BlockChain().CurrentBlock() + previousBlock := ethservice.BlockChain().GetBlockByHash(previousBlockHeader.Hash()) - // get previous block hash - previousBlock := ethservice.BlockChain().CurrentSafeBlock() - require.NotNil(t, previousBlock, "Previous block not found") + ethservice.BlockChain().SetOptimistic(previousBlock) + ethservice.BlockChain().SetSafe(previousBlockHeader) - gasLimit := ethservice.BlockChain().GasLimit() + require.NotNil(t, previousBlock, "Previous block not found") - stateDb, err := ethservice.BlockChain().StateAt(previousBlock.Root) + stateDb, err := ethservice.BlockChain().StateAt(previousBlock.Root()) require.Nil(t, err, "Failed to get state db") latestNonce := stateDb.GetNonce(testAddr) @@ -688,7 +688,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval } // add a tx with lesser gas than the base gas - unsignedTx := types.NewTransaction(latestNonce+uint64(5), testToAddress, big.NewInt(1), gasLimit, big.NewInt(params.InitialBaseFee*2), nil) + unsignedTx := types.NewTransaction(latestNonce+uint64(5), testToAddress, big.NewInt(1), ethservice.BlockChain().GasLimit(), big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) require.Nil(t, err, "Failed to sign tx") txs = append(txs, tx) @@ -699,19 +699,10 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) - errors := ethservice.TxPool().Add(txs, true, false) - for _, err := range errors { - require.Nil(t, err, "Failed to add tx to pool") - } - - pending, queued := ethservice.TxPool().Stats() - require.Equal(t, 6, pending, "Pending txs should be 6") - require.Equal(t, 0, queued, "Queued txs should be 0") - executeBlockReq := &astriaPb.ExecuteBlockRequest{ PrevBlockHash: previousBlock.Hash().Bytes(), Timestamp: ×tamppb.Timestamp{ - Seconds: int64(previousBlock.Time + 2), + Seconds: int64(previousBlock.Time() + 2), }, Transactions: marshalledTxs, } @@ -759,14 +750,6 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval require.NotNil(t, block, "Soft Block not found") require.Equal(t, block.Transactions().Len(), 5, "Soft Block should have 5 txs") - // give the tx loop time to run - time.Sleep(1 * time.Millisecond) - - // after the tx loop is run, all pending txs should be removed - pending, queued = ethservice.TxPool().Stats() - require.Equal(t, 0, pending, "Pending txs should be 0") - require.Equal(t, 0, queued, "Queued txs should be 0") - // check if the soft and firm block are set correctly require.True(t, bytes.Equal(softBlock.Hash().Bytes(), updateCommitmentStateRes.Soft.Hash), "Soft Block Hashes do not match") require.True(t, bytes.Equal(softBlock.ParentHash.Bytes(), updateCommitmentStateRes.Soft.ParentBlockHash), "Soft Block Parent Hash do not match") From c62b0bda3d066befaeeb3db40994aa738731c59a Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 26 Sep 2024 13:50:00 +0530 Subject: [PATCH 03/79] add logic to clear mempool --- core/txpool/legacypool/legacypool.go | 257 ++++++++++++++-------- core/txpool/legacypool/legacypool_test.go | 185 ++-------------- core/txpool/legacypool/list.go | 29 +++ core/txpool/legacypool/list_test.go | 1 - 4 files changed, 223 insertions(+), 249 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index c691c1807..f0e3b1fae 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1376,11 +1376,11 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, delete(events, addr) } } - // Reset needs promote for all addresses - promoteAddrs = make([]common.Address, 0, len(pool.queue)) - for addr := range pool.queue { - promoteAddrs = append(promoteAddrs, addr) - } + /// bharath: don't promote any addresses since we are going to be clearing the mempool + //promoteAddrs = make([]common.Address, 0, len(pool.queue)) + //for addr := range pool.queue { + // promoteAddrs = append(promoteAddrs, addr) + //} } // Check for pending transactions for every account that sent new ones promoted := pool.promoteExecutables(promoteAddrs) @@ -1389,7 +1389,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // remove any transaction that has been included in the block or was invalidated // because of another transaction (e.g. higher gas price). if reset != nil { - pool.demoteUnexecutables() + pool.clearPendingAndQueued() if reset.newHead != nil { if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) @@ -1435,82 +1435,84 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // of the transaction pool is valid with regard to the chain state. func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { // If we're reorging an old state, reinject all dropped transactions - var reinject types.Transactions - - if oldHead != nil && oldHead.Hash() != newHead.ParentHash { - // If the reorg is too deep, avoid doing it (will happen during fast sync) - oldNum := oldHead.Number.Uint64() - newNum := newHead.Number.Uint64() - - if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { - log.Debug("Skipping deep transaction reorg", "depth", depth) - } else { - // Reorg seems shallow enough to pull in all transactions into memory - var ( - rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) - add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) - ) - if rem == nil { - // This can happen if a setHead is performed, where we simply discard the old - // head from the chain. - // If that is the case, we don't have the lost transactions anymore, and - // there's nothing to add - if newNum >= oldNum { - // If we reorged to a same or higher number, then it's not a case of setHead - log.Warn("Transaction pool reset with missing old head", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - return - } - // If the reorg ended up on a lower number, it's indicative of setHead being the cause - log.Debug("Skipping transaction reset caused by setHead", - "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - // We still need to update the current state s.th. the lost transactions can be readded by the user - } else { - if add == nil { - // if the new head is nil, it means that something happened between - // the firing of newhead-event and _now_: most likely a - // reorg caused by sync-reversion or explicit sethead back to an - // earlier block. - log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) - return - } - var discarded, included types.Transactions - for rem.NumberU64() > add.NumberU64() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - } - for add.NumberU64() > rem.NumberU64() { - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - for rem.Hash() != add.Hash() { - discarded = append(discarded, rem.Transactions()...) - if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - return - } - included = append(included, add.Transactions()...) - if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - return - } - } - lost := make([]*types.Transaction, 0, len(discarded)) - for _, tx := range types.TxDifference(discarded, included) { - if pool.Filter(tx) { - lost = append(lost, tx) - } - } - reinject = lost - } - } - } + //var reinject types.Transactions + // + //if oldHead != nil && oldHead.Hash() != newHead.ParentHash { + // // If the reorg is too deep, avoid doing it (will happen during fast sync) + // oldNum := oldHead.Number.Uint64() + // newNum := newHead.Number.Uint64() + // + // if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { + // log.Debug("Skipping deep transaction reorg", "depth", depth) + // } else { + // // Reorg seems shallow enough to pull in all transactions into memory + // var ( + // rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) + // add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) + // ) + // if rem == nil { + // // This can happen if a setHead is performed, where we simply discard the old + // // head from the chain. + // // If that is the case, we don't have the lost transactions anymore, and + // // there's nothing to add + // if newNum >= oldNum { + // // If we reorged to a same or higher number, then it's not a case of setHead + // log.Warn("Transaction pool reset with missing old head", + // "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + // return + // } + // // If the reorg ended up on a lower number, it's indicative of setHead being the cause + // log.Debug("Skipping transaction reset caused by setHead", + // "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + // // We still need to update the current state s.th. the lost transactions can be readded by the user + // } else { + // if add == nil { + // // if the new head is nil, it means that something happened between + // // the firing of newhead-event and _now_: most likely a + // // reorg caused by sync-reversion or explicit sethead back to an + // // earlier block. + // log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) + // return + // } + // var discarded, included types.Transactions + // for rem.NumberU64() > add.NumberU64() { + // discarded = append(discarded, rem.Transactions()...) + // if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + // log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + // return + // } + // } + // for add.NumberU64() > rem.NumberU64() { + // included = append(included, add.Transactions()...) + // if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + // log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + // return + // } + // } + // for rem.Hash() != add.Hash() { + // discarded = append(discarded, rem.Transactions()...) + // if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + // log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + // return + // } + // included = append(included, add.Transactions()...) + // if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + // log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + // return + // } + // } + // lost := make([]*types.Transaction, 0, len(discarded)) + // for _, tx := range types.TxDifference(discarded, included) { + // if pool.Filter(tx) { + // lost = append(lost, tx) + // } + // } + // reinject = lost + // } + // } + //} + + // TODO - We only care about setting the head // Initialize the internal state to the current head if newHead == nil { newHead = pool.chain.CurrentBlock() // Special case during testing @@ -1524,10 +1526,11 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { pool.currentState = statedb pool.pendingNonces = newNoncer(statedb) - // Inject any transactions discarded due to reorgs - log.Debug("Reinjecting stale transactions", "count", len(reinject)) - core.SenderCacher.Recover(pool.signer, reinject) - pool.addTxsLocked(reinject, false) + //// we don't care about these + //// Inject any transactions discarded due to reorgs + //log.Debug("Reinjecting stale transactions", "count", len(reinject)) + //core.SenderCacher.Recover(pool.signer, reinject) + //pool.addTxsLocked(reinject, false) } // promoteExecutables moves transactions that have become processable from the @@ -1732,6 +1735,87 @@ func (pool *LegacyPool) truncateQueue() { } } +func (pool *LegacyPool) clearPendingAndQueued() { + // Iterate over all accounts and demote any non-executable transactions + for addr, list := range pool.queue { + dropped, invalids := list.ClearList() + queuedGauge.Dec(int64(len(dropped) + len(invalids))) + + for _, tx := range dropped { + pool.all.Remove(tx.Hash()) + } + + if list.Empty() { + delete(pool.queue, addr) + } + } + + for addr, list := range pool.pending { + dropped, invalids := list.ClearList() + pendingGauge.Dec(int64(len(dropped) + len(invalids))) + + for _, tx := range dropped { + pool.all.Remove(tx.Hash()) + } + + if list.Empty() { + delete(pool.pending, addr) + delete(pool.beats, addr) + } + } + + //for addr, list := range pool.pending { + // nonce := pool.currentState.GetNonce(addr) + // + // // Drop all transactions that are deemed too old (low nonce) + // olds := list.Forward(nonce) + // for _, tx := range olds { + // hash := tx.Hash() + // pool.all.Remove(hash) + // log.Trace("Removed old pending transaction", "hash", hash) + // } + // // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later + // drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit) + // for _, tx := range drops { + // hash := tx.Hash() + // log.Trace("Removed unpayable pending transaction", "hash", hash) + // pool.all.Remove(hash) + // } + // pendingNofundsMeter.Mark(int64(len(drops))) + // + // for _, tx := range invalids { + // hash := tx.Hash() + // log.Trace("Demoting pending transaction", "hash", hash) + // + // // Internal shuffle shouldn't touch the lookup set. + // pool.enqueueTx(hash, tx, false, false) + // } + // pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + // if pool.locals.contains(addr) { + // localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) + // } + // // If there's a gap in front, alert (should never happen) and postpone all transactions + // if list.Len() > 0 && list.txs.Get(nonce) == nil { + // gapped := list.Cap(0) + // for _, tx := range gapped { + // hash := tx.Hash() + // log.Error("Demoting invalidated transaction", "hash", hash) + // + // // Internal shuffle shouldn't touch the lookup set. + // pool.enqueueTx(hash, tx, false, false) + // } + // pendingGauge.Dec(int64(len(gapped))) + // } + // // Delete the entire pending entry if it became empty. + // if list.Empty() { + // delete(pool.pending, addr) + // if _, ok := pool.queue[addr]; !ok { + // pool.reserve(addr, false) + // } + // } + //} +} + // demoteUnexecutables removes invalid and processed transactions from the pools // executable/pending queue and any subsequent transactions that become unexecutable // are moved back into the future queue. @@ -1742,6 +1826,7 @@ func (pool *LegacyPool) truncateQueue() { func (pool *LegacyPool) demoteUnexecutables() { // Iterate over all accounts and demote any non-executable transactions gasLimit := pool.currentHead.Load().GasLimit + for addr, list := range pool.pending { nonce := pool.currentState.GetNonce(addr) diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index e35a42fed..e5246745d 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . - package legacypool import ( @@ -306,7 +305,8 @@ func TestStateChangeDuringReset(t *testing.T) { <-pool.requestReset(nil, nil) nonce = pool.Nonce(address) - if nonce != 2 { + // mempool is cleared + if nonce != 0 { t.Fatalf("Invalid nonce, want 2, got %d", nonce) } } @@ -701,58 +701,17 @@ func TestDropping(t *testing.T) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } <-pool.requestReset(nil, nil) - if pool.pending[account].Len() != 3 { - t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) - } - if pool.queue[account].Len() != 3 { - t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) - } - if pool.all.Count() != 6 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) - } - // Reduce the balance of the account, and check that invalidated transactions are dropped - testAddBalance(pool, account, big.NewInt(-650)) - <-pool.requestReset(nil, nil) - if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { - t.Errorf("funded pending transaction missing: %v", tx0) - } - if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok { - t.Errorf("funded pending transaction missing: %v", tx0) - } - if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok { - t.Errorf("out-of-fund pending transaction present: %v", tx1) - } - if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { - t.Errorf("funded queued transaction missing: %v", tx10) - } - if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok { - t.Errorf("funded queued transaction missing: %v", tx10) - } - if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok { - t.Errorf("out-of-fund queued transaction present: %v", tx11) - } - if pool.all.Count() != 4 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) - } - // Reduce the block gas limit, check that invalidated transactions are dropped - pool.chain.(*testBlockChain).gasLimit.Store(100) - <-pool.requestReset(nil, nil) + pending, queued := pool.Stats() - if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { - t.Errorf("funded pending transaction missing: %v", tx0) - } - if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok { - t.Errorf("over-gased pending transaction present: %v", tx1) - } - if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { - t.Errorf("funded queued transaction missing: %v", tx10) + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) } - if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok { - t.Errorf("over-gased queued transaction present: %v", tx11) + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if pool.all.Count() != 2 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2) + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) } } @@ -809,64 +768,18 @@ func TestPostponing(t *testing.T) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } <-pool.requestReset(nil, nil) - if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { - t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) - } - if len(pool.queue) != 0 { - t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) - } - if pool.all.Count() != len(txs) { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) - } - // Reduce the balance of the account, and check that transactions are reorganised - for _, addr := range accs { - testAddBalance(pool, addr, big.NewInt(-1)) - } - <-pool.requestReset(nil, nil) - // The first account's first transaction remains valid, check that subsequent - // ones are either filtered out, or queued up for later. - if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok { - t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) - } - if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok { - t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) - } - for i, tx := range txs[1:100] { - if i%2 == 1 { - if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { - t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx) - } - if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok { - t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx) - } - } else { - if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { - t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx) - } - if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok { - t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx) - } - } - } - // The second account's first transaction got invalid, check that all transactions - // are either filtered out, or queued up for later. - if pool.pending[accs[1]] != nil { - t.Errorf("invalidated account still has pending transactions") + pending, queued := pool.Stats() + + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) } - for i, tx := range txs[100:] { - if i%2 == 1 { - if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok { - t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx) - } - } else { - if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok { - t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx) - } - } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } - if pool.all.Count() != len(txs)/2 { - t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2) + + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) } } @@ -1059,6 +972,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { func TestQueueTimeLimiting(t *testing.T) { testQueueTimeLimiting(t, false) } + func TestQueueTimeLimitingNoLocals(t *testing.T) { testQueueTimeLimiting(t, true) } @@ -1156,55 +1070,6 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } - - // Queue gapped transactions - if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } - if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } - time.Sleep(5 * evictionInterval) // A half lifetime pass - - // Queue executable transactions, the life cycle should be restarted. - if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } - if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { - t.Fatalf("failed to add remote transaction: %v", err) - } - time.Sleep(6 * evictionInterval) - - // All gapped transactions shouldn't be kicked out - pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - if queued != 2 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } - - // The whole life time pass after last promotion, kick out stale transactions - time.Sleep(2 * config.Lifetime) - pending, queued = pool.Stats() - if pending != 2 { - t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) - } - if nolocals { - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - } else { - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } - } - if err := validatePoolInternals(pool); err != nil { - t.Fatalf("pool internal state corrupted: %v", err) - } } // Tests that even if the transaction count belonging to a single account goes @@ -2410,6 +2275,7 @@ func TestReplacementDynamicFee(t *testing.T) { // Tests that local transactions are journaled to disk, but remote transactions // get discarded between restarts. +// TODO - fix this func TestJournaling(t *testing.T) { testJournaling(t, false) } func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) } @@ -2505,18 +2371,13 @@ func testJournaling(t *testing.T, nolocals bool) { pool = New(config, blockchain) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + // tx mempool is cleared out completely after a reset pending, queued = pool.Stats() if pending != 0 { t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) } - if nolocals { - if queued != 0 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) - } - } else { - if queued != 1 { - t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) - } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) } if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go index b749db44d..6b1a48b15 100644 --- a/core/txpool/legacypool/list.go +++ b/core/txpool/legacypool/list.go @@ -396,6 +396,35 @@ func (l *list) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactio return removed, invalids } +func (l *list) ClearList() (types.Transactions, types.Transactions) { + // Filter out all the transactions + removed := l.txs.Filter(func(tx *types.Transaction) bool { + return true + }) + + if len(removed) == 0 { + return nil, nil + } + + // TODO: we might not need the code below + var invalids types.Transactions + // If the list was strict, filter anything above the lowest nonce + if l.strict { + lowest := uint64(math.MaxUint64) + for _, tx := range removed { + if nonce := tx.Nonce(); lowest > nonce { + lowest = nonce + } + } + invalids = l.txs.filter(func(tx *types.Transaction) bool { return tx.Nonce() > lowest }) + } + // Reset total cost + l.subTotalCost(removed) + l.subTotalCost(invalids) + l.txs.reheap() + return removed, invalids +} + // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. func (l *list) Cap(threshold int) types.Transactions { diff --git a/core/txpool/legacypool/list_test.go b/core/txpool/legacypool/list_test.go index 8587c66f7..b46574867 100644 --- a/core/txpool/legacypool/list_test.go +++ b/core/txpool/legacypool/list_test.go @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . - package legacypool import ( From e09cfde81ec05a007ab8b538052e1875670082a5 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 26 Sep 2024 14:00:07 +0530 Subject: [PATCH 04/79] remove invalids --- core/txpool/legacypool/legacypool.go | 57 +++------------------------- 1 file changed, 6 insertions(+), 51 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index f0e3b1fae..e12e77786 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1744,6 +1744,9 @@ func (pool *LegacyPool) clearPendingAndQueued() { for _, tx := range dropped { pool.all.Remove(tx.Hash()) } + for _, tx := range invalids { + pool.all.Remove(tx.Hash()) + } if list.Empty() { delete(pool.queue, addr) @@ -1757,63 +1760,15 @@ func (pool *LegacyPool) clearPendingAndQueued() { for _, tx := range dropped { pool.all.Remove(tx.Hash()) } + for _, tx := range invalids { + pool.all.Remove(tx.Hash()) + } if list.Empty() { delete(pool.pending, addr) delete(pool.beats, addr) } } - - //for addr, list := range pool.pending { - // nonce := pool.currentState.GetNonce(addr) - // - // // Drop all transactions that are deemed too old (low nonce) - // olds := list.Forward(nonce) - // for _, tx := range olds { - // hash := tx.Hash() - // pool.all.Remove(hash) - // log.Trace("Removed old pending transaction", "hash", hash) - // } - // // Drop all transactions that are too costly (low balance or out of gas), and queue any invalids back for later - // drops, invalids := list.Filter(pool.currentState.GetBalance(addr), gasLimit) - // for _, tx := range drops { - // hash := tx.Hash() - // log.Trace("Removed unpayable pending transaction", "hash", hash) - // pool.all.Remove(hash) - // } - // pendingNofundsMeter.Mark(int64(len(drops))) - // - // for _, tx := range invalids { - // hash := tx.Hash() - // log.Trace("Demoting pending transaction", "hash", hash) - // - // // Internal shuffle shouldn't touch the lookup set. - // pool.enqueueTx(hash, tx, false, false) - // } - // pendingGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - // if pool.locals.contains(addr) { - // localGauge.Dec(int64(len(olds) + len(drops) + len(invalids))) - // } - // // If there's a gap in front, alert (should never happen) and postpone all transactions - // if list.Len() > 0 && list.txs.Get(nonce) == nil { - // gapped := list.Cap(0) - // for _, tx := range gapped { - // hash := tx.Hash() - // log.Error("Demoting invalidated transaction", "hash", hash) - // - // // Internal shuffle shouldn't touch the lookup set. - // pool.enqueueTx(hash, tx, false, false) - // } - // pendingGauge.Dec(int64(len(gapped))) - // } - // // Delete the entire pending entry if it became empty. - // if list.Empty() { - // delete(pool.pending, addr) - // if _, ok := pool.queue[addr]; !ok { - // pool.reserve(addr, false) - // } - // } - //} } // demoteUnexecutables removes invalid and processed transactions from the pools From 878c5c0235f57179d2a9c72917f25577c902757f Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 26 Sep 2024 14:09:21 +0530 Subject: [PATCH 05/79] add comments --- core/txpool/legacypool/legacypool.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index e12e77786..55be566a3 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1735,6 +1735,8 @@ func (pool *LegacyPool) truncateQueue() { } } +// clearPendingAndQueued removes invalid and processed transactions from the pools +// it assumes that the pool lock is being held func (pool *LegacyPool) clearPendingAndQueued() { // Iterate over all accounts and demote any non-executable transactions for addr, list := range pool.queue { From f2fdd0fbc8c46d7b5d24732858a8bca1032a176c Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 16 Oct 2024 23:16:21 +0530 Subject: [PATCH 06/79] remove mempool conditionals --- core/txpool/legacypool/legacypool.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 55be566a3..b1ae2e62a 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1376,11 +1376,10 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, delete(events, addr) } } - /// bharath: don't promote any addresses since we are going to be clearing the mempool - //promoteAddrs = make([]common.Address, 0, len(pool.queue)) - //for addr := range pool.queue { - // promoteAddrs = append(promoteAddrs, addr) - //} + promoteAddrs = make([]common.Address, 0, len(pool.queue)) + for addr := range pool.queue { + promoteAddrs = append(promoteAddrs, addr) + } } // Check for pending transactions for every account that sent new ones promoted := pool.promoteExecutables(promoteAddrs) From 6a8e4462c1c1cb256624e8b841ec046255f5c257 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 24 Oct 2024 11:57:38 +0530 Subject: [PATCH 07/79] split out reset logic and head only reset logc --- core/txpool/legacypool/legacypool.go | 182 +++++++++++++++------------ 1 file changed, 99 insertions(+), 83 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index b1ae2e62a..0bf2f21db 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1434,84 +1434,83 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // of the transaction pool is valid with regard to the chain state. func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { // If we're reorging an old state, reinject all dropped transactions - //var reinject types.Transactions - // - //if oldHead != nil && oldHead.Hash() != newHead.ParentHash { - // // If the reorg is too deep, avoid doing it (will happen during fast sync) - // oldNum := oldHead.Number.Uint64() - // newNum := newHead.Number.Uint64() - // - // if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { - // log.Debug("Skipping deep transaction reorg", "depth", depth) - // } else { - // // Reorg seems shallow enough to pull in all transactions into memory - // var ( - // rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) - // add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) - // ) - // if rem == nil { - // // This can happen if a setHead is performed, where we simply discard the old - // // head from the chain. - // // If that is the case, we don't have the lost transactions anymore, and - // // there's nothing to add - // if newNum >= oldNum { - // // If we reorged to a same or higher number, then it's not a case of setHead - // log.Warn("Transaction pool reset with missing old head", - // "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - // return - // } - // // If the reorg ended up on a lower number, it's indicative of setHead being the cause - // log.Debug("Skipping transaction reset caused by setHead", - // "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) - // // We still need to update the current state s.th. the lost transactions can be readded by the user - // } else { - // if add == nil { - // // if the new head is nil, it means that something happened between - // // the firing of newhead-event and _now_: most likely a - // // reorg caused by sync-reversion or explicit sethead back to an - // // earlier block. - // log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) - // return - // } - // var discarded, included types.Transactions - // for rem.NumberU64() > add.NumberU64() { - // discarded = append(discarded, rem.Transactions()...) - // if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - // log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - // return - // } - // } - // for add.NumberU64() > rem.NumberU64() { - // included = append(included, add.Transactions()...) - // if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - // log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - // return - // } - // } - // for rem.Hash() != add.Hash() { - // discarded = append(discarded, rem.Transactions()...) - // if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { - // log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) - // return - // } - // included = append(included, add.Transactions()...) - // if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { - // log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) - // return - // } - // } - // lost := make([]*types.Transaction, 0, len(discarded)) - // for _, tx := range types.TxDifference(discarded, included) { - // if pool.Filter(tx) { - // lost = append(lost, tx) - // } - // } - // reinject = lost - // } - // } - //} - - // TODO - We only care about setting the head + var reinject types.Transactions + + if oldHead != nil && oldHead.Hash() != newHead.ParentHash { + // If the reorg is too deep, avoid doing it (will happen during fast sync) + oldNum := oldHead.Number.Uint64() + newNum := newHead.Number.Uint64() + + if depth := uint64(math.Abs(float64(oldNum) - float64(newNum))); depth > 64 { + log.Debug("Skipping deep transaction reorg", "depth", depth) + } else { + // Reorg seems shallow enough to pull in all transactions into memory + var ( + rem = pool.chain.GetBlock(oldHead.Hash(), oldHead.Number.Uint64()) + add = pool.chain.GetBlock(newHead.Hash(), newHead.Number.Uint64()) + ) + if rem == nil { + // This can happen if a setHead is performed, where we simply discard the old + // head from the chain. + // If that is the case, we don't have the lost transactions anymore, and + // there's nothing to add + if newNum >= oldNum { + // If we reorged to a same or higher number, then it's not a case of setHead + log.Warn("Transaction pool reset with missing old head", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + return + } + // If the reorg ended up on a lower number, it's indicative of setHead being the cause + log.Debug("Skipping transaction reset caused by setHead", + "old", oldHead.Hash(), "oldnum", oldNum, "new", newHead.Hash(), "newnum", newNum) + // We still need to update the current state s.th. the lost transactions can be readded by the user + } else { + if add == nil { + // if the new head is nil, it means that something happened between + // the firing of newhead-event and _now_: most likely a + // reorg caused by sync-reversion or explicit sethead back to an + // earlier block. + log.Warn("Transaction pool reset with missing new head", "number", newHead.Number, "hash", newHead.Hash()) + return + } + var discarded, included types.Transactions + for rem.NumberU64() > add.NumberU64() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + } + for add.NumberU64() > rem.NumberU64() { + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + for rem.Hash() != add.Hash() { + discarded = append(discarded, rem.Transactions()...) + if rem = pool.chain.GetBlock(rem.ParentHash(), rem.NumberU64()-1); rem == nil { + log.Error("Unrooted old chain seen by tx pool", "block", oldHead.Number, "hash", oldHead.Hash()) + return + } + included = append(included, add.Transactions()...) + if add = pool.chain.GetBlock(add.ParentHash(), add.NumberU64()-1); add == nil { + log.Error("Unrooted new chain seen by tx pool", "block", newHead.Number, "hash", newHead.Hash()) + return + } + } + lost := make([]*types.Transaction, 0, len(discarded)) + for _, tx := range types.TxDifference(discarded, included) { + if pool.Filter(tx) { + lost = append(lost, tx) + } + } + reinject = lost + } + } + } + // Initialize the internal state to the current head if newHead == nil { newHead = pool.chain.CurrentBlock() // Special case during testing @@ -1525,11 +1524,28 @@ func (pool *LegacyPool) reset(oldHead, newHead *types.Header) { pool.currentState = statedb pool.pendingNonces = newNoncer(statedb) - //// we don't care about these - //// Inject any transactions discarded due to reorgs - //log.Debug("Reinjecting stale transactions", "count", len(reinject)) - //core.SenderCacher.Recover(pool.signer, reinject) - //pool.addTxsLocked(reinject, false) + // we don't care about these + // Inject any transactions discarded due to reorgs + log.Debug("Reinjecting stale transactions", "count", len(reinject)) + core.SenderCacher.Recover(pool.signer, reinject) + pool.addTxsLocked(reinject, false) +} + +// reset retrieves the current state of the blockchain and ensures the content +// of the transaction pool is valid with regard to the chain state. +func (pool *LegacyPool) resetHeadOnly(oldHead, newHead *types.Header) { + // Initialize the internal state to the current head + if newHead == nil { + newHead = pool.chain.CurrentBlock() // Special case during testing + } + statedb, err := pool.chain.StateAt(newHead.Root) + if err != nil { + log.Error("Failed to reset txpool state", "err", err) + return + } + pool.currentHead.Store(newHead) + pool.currentState = statedb + pool.pendingNonces = newNoncer(statedb) } // promoteExecutables moves transactions that have become processable from the From 01be51202731903b11b3875419b66f02485f8810 Mon Sep 17 00:00:00 2001 From: Bharath Date: Sun, 10 Nov 2024 13:59:05 +0530 Subject: [PATCH 08/79] ensure that unreserved addresses are removed --- core/txpool/legacypool/legacypool.go | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 0bf2f21db..c67d8801d 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1754,9 +1754,9 @@ func (pool *LegacyPool) truncateQueue() { // it assumes that the pool lock is being held func (pool *LegacyPool) clearPendingAndQueued() { // Iterate over all accounts and demote any non-executable transactions - for addr, list := range pool.queue { + for addr, list := range pool.pending { dropped, invalids := list.ClearList() - queuedGauge.Dec(int64(len(dropped) + len(invalids))) + pendingGauge.Dec(int64(len(dropped) + len(invalids))) for _, tx := range dropped { pool.all.Remove(tx.Hash()) @@ -1766,13 +1766,14 @@ func (pool *LegacyPool) clearPendingAndQueued() { } if list.Empty() { - delete(pool.queue, addr) + delete(pool.pending, addr) + delete(pool.beats, addr) } } - for addr, list := range pool.pending { + for addr, list := range pool.queue { dropped, invalids := list.ClearList() - pendingGauge.Dec(int64(len(dropped) + len(invalids))) + queuedGauge.Dec(int64(len(dropped) + len(invalids))) for _, tx := range dropped { pool.all.Remove(tx.Hash()) @@ -1782,10 +1783,13 @@ func (pool *LegacyPool) clearPendingAndQueued() { } if list.Empty() { - delete(pool.pending, addr) - delete(pool.beats, addr) + if _, ok := pool.queue[addr]; !ok { + pool.reserve(addr, false) + } + delete(pool.queue, addr) } } + } // demoteUnexecutables removes invalid and processed transactions from the pools From 2051f8a57b730b742724358c6d87f01f9ae33e2b Mon Sep 17 00:00:00 2001 From: Bharath Date: Sun, 10 Nov 2024 14:03:58 +0530 Subject: [PATCH 09/79] add event for mempool clearance --- core/events.go | 6 ++ core/txpool/legacypool/legacypool.go | 32 ++++++--- core/txpool/legacypool/legacypool_test.go | 80 +++++++++++++++++++++++ grpc/execution/server_test.go | 2 + 4 files changed, 110 insertions(+), 10 deletions(-) diff --git a/core/events.go b/core/events.go index 4f4c01e3b..4afcadb1b 100644 --- a/core/events.go +++ b/core/events.go @@ -24,6 +24,12 @@ import ( // NewTxsEvent is posted when a batch of transactions enter the transaction pool. type NewTxsEvent struct{ Txs []*types.Transaction } +// NewMempoolClearedEvent is posted when the mempool is cleared after a head reset for trusted auctioneer +type NewMempoolCleared struct { + // the new head to which the mempool state was reset to before clearing the mempool + NewHead *types.Header +} + // NewMinedBlockEvent is posted when a block has been imported. type NewMinedBlockEvent struct{ Block *types.Block } diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index c67d8801d..b8c66617f 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -205,13 +205,14 @@ func (config *Config) sanitize() Config { // current state) and future transactions. Transactions move between those // two states over time as they are received and processed. type LegacyPool struct { - config Config - chainconfig *params.ChainConfig - chain BlockChain - gasTip atomic.Pointer[uint256.Int] - txFeed event.Feed - signer types.Signer - mu sync.RWMutex + config Config + chainconfig *params.ChainConfig + chain BlockChain + gasTip atomic.Pointer[uint256.Int] + txFeed event.Feed + mempoolClearFeed event.Feed + signer types.Signer + mu sync.RWMutex astria *astriaOrdered @@ -521,6 +522,12 @@ func (pool *LegacyPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs return pool.txFeed.Subscribe(ch) } +// SubscribeTransactions registers a subscription for the event which is triggered +// when the mempool is cleared after a reset +func (pool *LegacyPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription { + return pool.mempoolClearFeed.Subscribe(ch) +} + // SetGasTip updates the minimum gas tip required by the transaction pool for a // new transaction, and drops all transactions below this threshold. func (pool *LegacyPool) SetGasTip(tip *big.Int) { @@ -1388,7 +1395,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // remove any transaction that has been included in the block or was invalidated // because of another transaction (e.g. higher gas price). if reset != nil { - pool.clearPendingAndQueued() + pool.clearPendingAndQueued(reset.newHead) if reset.newHead != nil { if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) @@ -1413,6 +1420,12 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, pool.changesSinceReorg = 0 // Reset change counter pool.mu.Unlock() + if reset != nil { + if reset.newHead != nil { + pool.mempoolClearFeed.Send(core.NewMempoolCleared{NewHead: reset.newHead}) + } + } + // Notify subsystems for newly added transactions for _, tx := range promoted { addr, _ := types.Sender(pool.signer, tx) @@ -1752,7 +1765,7 @@ func (pool *LegacyPool) truncateQueue() { // clearPendingAndQueued removes invalid and processed transactions from the pools // it assumes that the pool lock is being held -func (pool *LegacyPool) clearPendingAndQueued() { +func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { // Iterate over all accounts and demote any non-executable transactions for addr, list := range pool.pending { dropped, invalids := list.ClearList() @@ -1789,7 +1802,6 @@ func (pool *LegacyPool) clearPendingAndQueued() { delete(pool.queue, addr) } } - } // demoteUnexecutables removes invalid and processed transactions from the pools diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index e5246745d..aa9fe6c92 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -300,6 +300,10 @@ func TestStateChangeDuringReset(t *testing.T) { t.Fatalf("Invalid nonce, want 2, got %d", nonce) } + mempoolClearedCh := make(chan core.NewMempoolCleared, 1) + mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh) + defer mempoolClearedSub.Unsubscribe() + // trigger state change in the background trigger = true <-pool.requestReset(nil, nil) @@ -309,6 +313,17 @@ func TestStateChangeDuringReset(t *testing.T) { if nonce != 0 { t.Fatalf("Invalid nonce, want 2, got %d", nonce) } + + select { + case mempoolClear := <-mempoolClearedCh: + if mempoolClear.NewHead != nil { + t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead) + } + case <-time.After(1 * time.Second): + t.Fatalf("Mempool cleared event not received") + case err := <-mempoolClearedSub.Err(): + t.Fatalf("Mempool cleared subscription error: %v", err) + } } func testAddBalance(pool *LegacyPool, addr common.Address, amount *big.Int) { @@ -700,6 +715,11 @@ func TestDropping(t *testing.T) { if pool.all.Count() != 6 { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) } + + mempoolClearedCh := make(chan core.NewMempoolCleared, 1) + mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh) + defer mempoolClearedSub.Unsubscribe() + <-pool.requestReset(nil, nil) pending, queued := pool.Stats() @@ -713,6 +733,17 @@ func TestDropping(t *testing.T) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + + select { + case mempoolClear := <-mempoolClearedCh: + if mempoolClear.NewHead != nil { + t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead) + } + case <-time.After(1 * time.Second): + t.Fatalf("Mempool cleared event not received") + case err := <-mempoolClearedSub.Err(): + t.Fatalf("Mempool cleared subscription error: %v", err) + } } // Tests that if a transaction is dropped from the current pending pool (e.g. out @@ -767,6 +798,11 @@ func TestPostponing(t *testing.T) { if pool.all.Count() != len(txs) { t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) } + + mempoolClearedCh := make(chan core.NewMempoolCleared, 1) + mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh) + defer mempoolClearedSub.Unsubscribe() + <-pool.requestReset(nil, nil) pending, queued := pool.Stats() @@ -781,6 +817,17 @@ func TestPostponing(t *testing.T) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + + select { + case mempoolClear := <-mempoolClearedCh: + if mempoolClear.NewHead != nil { + t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead) + } + case <-time.After(1 * time.Second): + t.Fatalf("Mempool cleared event not received") + case err := <-mempoolClearedSub.Err(): + t.Fatalf("Mempool cleared subscription error: %v", err) + } } // Tests that if the transaction pool has both executable and non-executable @@ -1057,6 +1104,11 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { // remove current transactions and increase nonce to prepare for a reset and cleanup statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2) statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) + + mempoolClearedCh := make(chan core.NewMempoolCleared, 1) + mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh) + defer mempoolClearedSub.Unsubscribe() + <-pool.requestReset(nil, nil) // make sure queue, pending are cleared @@ -1070,6 +1122,17 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + + select { + case mempoolClear := <-mempoolClearedCh: + if mempoolClear.NewHead != nil { + t.Fatalf("Expected mempool cleared head to be nil: %v", mempoolClear.NewHead) + } + case <-time.After(1 * time.Second): + t.Fatalf("Mempool cleared event not received") + case err := <-mempoolClearedSub.Err(): + t.Fatalf("Mempool cleared subscription error: %v", err) + } } // Tests that even if the transaction count belonging to a single account goes @@ -2362,6 +2425,11 @@ func testJournaling(t *testing.T, nolocals bool) { } // Bump the nonce temporarily and ensure the newly invalidated transaction is removed statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) + + mempoolClearedCh := make(chan core.NewMempoolCleared, 1) + mempoolClearedSub := pool.SubscribeMempoolClearance(mempoolClearedCh) + defer mempoolClearedSub.Unsubscribe() + <-pool.requestReset(nil, nil) time.Sleep(2 * config.Rejournal) pool.Close() @@ -2382,6 +2450,18 @@ func testJournaling(t *testing.T, nolocals bool) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + + select { + case mempoolClear := <-mempoolClearedCh: + if mempoolClear.NewHead != nil { + t.Fatalf("mempool clear event should not have a new head") + } + case <-time.After(1 * time.Second): + t.Fatalf("mempool clear event not received") + case err := <-mempoolClearedSub.Err(): + t.Fatalf("mempool clear event subscription error: %v", err) + } + pool.Close() } diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 2dec2e861..2ce2738e0 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -358,6 +358,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { require.Equal(t, block.NumberU64(), blockEvent.Block.NumberU64(), "Optimistic head event block number is not correct") case <-time.After(2 * time.Second): require.FailNow(t, "Optimistic head event not received") + case err := <-optimsticHeadSub.Err(): + require.Nil(t, err, "Optimistic head event subscription failed") } } }) From 864138e9c8ac64c3acf017f3921e56682714eedd Mon Sep 17 00:00:00 2001 From: Bharath Date: Fri, 27 Sep 2024 15:12:44 +0530 Subject: [PATCH 10/79] update subscription interfaces --- core/txpool/blobpool/blobpool.go | 4 ++++ core/txpool/subpool.go | 3 +++ core/txpool/txpool.go | 9 +++++++++ 3 files changed, 16 insertions(+) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 1305b6a07..68cd0df1d 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -1602,6 +1602,10 @@ func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool } } +func (p *BlobPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription { + panic("not implemented") +} + // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. func (p *BlobPool) Nonce(addr common.Address) uint64 { diff --git a/core/txpool/subpool.go b/core/txpool/subpool.go index be59ec861..0f640b493 100644 --- a/core/txpool/subpool.go +++ b/core/txpool/subpool.go @@ -140,6 +140,9 @@ type SubPool interface { // or also for reorged out ones. SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) event.Subscription + // SubscribeMempoolClearance subscribes to new mempool clearing events. + SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription + // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. Nonce(addr common.Address) uint64 diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 7767e205b..eec53a2bc 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -431,6 +431,15 @@ func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) return p.subs.Track(event.JoinSubscriptions(subs...)) } +// SubscribeMempoolClearance registers a subscription for new mempool clearance events +func (p *TxPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription { + subs := make([]event.Subscription, len(p.subpools)) + for i, subpool := range p.subpools { + subs[i] = subpool.SubscribeMempoolClearance(ch) + } + return p.subs.Track(event.JoinSubscriptions(subs...)) +} + // Nonce returns the next nonce of an account, with all transactions executable // by the pool already applied on top. func (p *TxPool) Nonce(addr common.Address) uint64 { From f7bb86f431d9f24540f657465546c097fa31826b Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 4 Nov 2024 12:15:55 +0530 Subject: [PATCH 11/79] fix potential panic while subscribing to mempool clearance --- core/txpool/blobpool/blobpool.go | 2 +- core/txpool/txpool.go | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 68cd0df1d..30b507f08 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -1603,7 +1603,7 @@ func (p *BlobPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool } func (p *BlobPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription { - panic("not implemented") + return nil } // Nonce returns the next nonce of an account, with all transactions executable diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index eec53a2bc..883af5635 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -433,9 +433,12 @@ func (p *TxPool) SubscribeTransactions(ch chan<- core.NewTxsEvent, reorgs bool) // SubscribeMempoolClearance registers a subscription for new mempool clearance events func (p *TxPool) SubscribeMempoolClearance(ch chan<- core.NewMempoolCleared) event.Subscription { - subs := make([]event.Subscription, len(p.subpools)) - for i, subpool := range p.subpools { - subs[i] = subpool.SubscribeMempoolClearance(ch) + subs := []event.Subscription{} + for _, subpool := range p.subpools { + sub := subpool.SubscribeMempoolClearance(ch) + if sub != nil { + subs = append(subs, sub) + } } return p.subs.Track(event.JoinSubscriptions(subs...)) } From 9a352f1a710547ced3ea42a059082cbe76b578db Mon Sep 17 00:00:00 2001 From: Bharath Date: Sun, 10 Nov 2024 14:38:41 +0530 Subject: [PATCH 12/79] dont send mempool clearing event while holding mempool lock --- core/txpool/legacypool/legacypool.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index b8c66617f..b6e769452 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1421,9 +1421,7 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, pool.mu.Unlock() if reset != nil { - if reset.newHead != nil { - pool.mempoolClearFeed.Send(core.NewMempoolCleared{NewHead: reset.newHead}) - } + pool.mempoolClearFeed.Send(core.NewMempoolCleared{NewHead: reset.newHead}) } // Notify subsystems for newly added transactions From 7dc78f578966154f6024c0d8b2f0506086750a61 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 29 Oct 2024 14:11:42 +0530 Subject: [PATCH 13/79] implement stream execute optimistic block --- grpc/execution/server.go | 60 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 55 insertions(+), 5 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index ea63ca35a..2399c9c9c 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -9,10 +9,12 @@ import ( "crypto/sha256" "errors" "fmt" + "io" "math/big" "sync" "time" + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" astriaGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/execution/v1/executionv1grpc" optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" @@ -37,6 +39,8 @@ type ExecutionServiceServerV1 struct { // NOTE - from the generated code: All implementations must embed // UnimplementedExecutionServiceServer for forward compatibility astriaGrpc.UnimplementedExecutionServiceServer + optimisticGrpc.UnimplementedOptimisticExecutionServiceServer + optimisticGrpc.UnimplementedBundleServiceServer eth *eth.Ethereum bc *core.BlockChain @@ -51,6 +55,8 @@ type ExecutionServiceServerV1 struct { bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty nextFeeRecipient common.Address // Fee recipient for the next block + + currentOptimisticSequencerBlock []byte } var ( @@ -148,11 +154,12 @@ func NewExecutionServiceServerV1(eth *eth.Ethereum) (*ExecutionServiceServerV1, } return &ExecutionServiceServerV1{ - eth: eth, - bc: bc, - bridgeAddresses: bridgeAddresses, - bridgeAllowedAssets: bridgeAllowedAssets, - nextFeeRecipient: nextFeeRecipient, + eth: eth, + bc: bc, + bridgeAddresses: bridgeAddresses, + bridgeAllowedAssets: bridgeAllowedAssets, + nextFeeRecipient: nextFeeRecipient, + currentOptimisticSequencerBlock: []byte{}, }, nil } @@ -234,6 +241,48 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } +func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_StreamExecuteOptimisticBlockServer) error { + mempoolClearingEventCh := make(chan core.NewMempoolCleared) + mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) + defer mempoolClearingEvent.Unsubscribe() + + for { + msg, err := stream.Recv() + if errors.Is(err, io.EOF) { + return nil + } + if err != nil { + return err + } + + baseBlock := msg.GetBlock() + + // execute the optimistic block and wait for the mempool clearing event + optimisticBlock, err := s.ExecuteOptimisticBlock(context.Background(), baseBlock) + if err != nil { + return status.Error(codes.Internal, "failed to execute optimistic block") + } + optimisticBlockHash := common.BytesToHash(optimisticBlock.Hash) + + // listen to the mempool clearing event and send the response back to the auctioneer when the mempool is cleared + select { + case event := <-mempoolClearingEventCh: + if event.NewHead.Hash() != optimisticBlockHash { + return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") + } + s.currentOptimisticSequencerBlock = baseBlock.SequencerBlockHash + err = stream.Send(&optimsticPb.StreamExecuteOptimisticBlockResponse{ + Block: optimisticBlock, + BaseSequencerBlockHash: baseBlock.SequencerBlockHash, + }) + case <-time.After(10 * time.Second): + return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") + case err := <-mempoolClearingEvent.Err(): + return status.Error(codes.Internal, fmt.Sprintf("error waiting for mempool clearing event: %v", err)) + } + } +} + func (s *ExecutionServiceServerV1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) { // we need to execute the optimistic block log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash) @@ -253,6 +302,7 @@ func (s *ExecutionServiceServerV1) ExecuteOptimisticBlock(ctx context.Context, r defer executionOptimisticBlockTimer.UpdateSince(executionStart) // get the soft block + // TODO - we will have to take an update commitment lock here softBlock := s.bc.CurrentSafeBlock() s.blockExecutionLock.Lock() From f311e0aac0c973eee32be78b086c8747c55d5872 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 4 Nov 2024 12:16:38 +0530 Subject: [PATCH 14/79] unit tests --- grpc/execution/mock_grpc_stream.go | 58 ++++ grpc/execution/server.go | 4 +- grpc/execution/server_test.go | 407 ++++++++++++++++++----------- 3 files changed, 312 insertions(+), 157 deletions(-) create mode 100644 grpc/execution/mock_grpc_stream.go diff --git a/grpc/execution/mock_grpc_stream.go b/grpc/execution/mock_grpc_stream.go new file mode 100644 index 000000000..5349418ba --- /dev/null +++ b/grpc/execution/mock_grpc_stream.go @@ -0,0 +1,58 @@ +package execution + +import ( + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + "context" + "google.golang.org/grpc/metadata" + "io" + "time" +) + +type MockStream struct { + requestStream []*optimsticPb.StreamExecuteOptimisticBlockRequest + accumulatedResponses []*optimsticPb.StreamExecuteOptimisticBlockResponse + requestCounter uint64 +} + +func (ms *MockStream) Recv() (*optimsticPb.StreamExecuteOptimisticBlockRequest, error) { + // add a delay to make it look like an async stream + time.Sleep(2 * time.Second) + if ms.requestCounter > uint64(len(ms.requestStream)-1) { + // end the stream after all the packets have been sent + return nil, io.EOF + } + + req := ms.requestStream[ms.requestCounter] + ms.requestCounter += 1 + + return req, nil +} + +func (ms *MockStream) Send(res *optimsticPb.StreamExecuteOptimisticBlockResponse) error { + ms.accumulatedResponses = append(ms.accumulatedResponses, res) + return nil +} + +func (ms *MockStream) SetHeader(md metadata.MD) error { + panic("implement me") +} + +func (ms *MockStream) SendHeader(md metadata.MD) error { + panic("implement me") +} + +func (ms *MockStream) SetTrailer(md metadata.MD) { + panic("implement me") +} + +func (ms *MockStream) Context() context.Context { + return context.Background() +} + +func (ms *MockStream) SendMsg(m any) error { + panic("implement me") +} + +func (ms *MockStream) RecvMsg(m any) error { + panic("implement me") +} diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 2399c9c9c..54bda5798 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -248,6 +248,7 @@ func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisti for { msg, err := stream.Recv() + // stream has been closed if errors.Is(err, io.EOF) { return nil } @@ -258,7 +259,7 @@ func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisti baseBlock := msg.GetBlock() // execute the optimistic block and wait for the mempool clearing event - optimisticBlock, err := s.ExecuteOptimisticBlock(context.Background(), baseBlock) + optimisticBlock, err := s.ExecuteOptimisticBlock(stream.Context(), baseBlock) if err != nil { return status.Error(codes.Internal, "failed to execute optimistic block") } @@ -302,7 +303,6 @@ func (s *ExecutionServiceServerV1) ExecuteOptimisticBlock(ctx context.Context, r defer executionOptimisticBlockTimer.UpdateSince(executionStart) // get the soft block - // TODO - we will have to take an update commitment lock here softBlock := s.bc.CurrentSafeBlock() s.blockExecutionLock.Lock() diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 2ce2738e0..076efadd9 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -199,7 +199,7 @@ func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { return &primitivev1.Uint128{Lo: lo, Hi: hi} } -func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { +func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { ethservice, _ := setupExecutionService(t, 10) tests := []struct { @@ -212,7 +212,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { expectedReturnCode codes.Code }{ { - description: "ExecuteOptimisticBlock without calling GetGenesisInfo and GetCommitmentState", + description: "ExecuteBlock without calling GetGenesisInfo and GetCommitmentState", callGenesisInfoAndGetCommitmentState: false, numberOfTxs: 5, prevBlockHash: ethservice.BlockChain().GetBlockByNumber(2).Hash().Bytes(), @@ -221,7 +221,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { expectedReturnCode: codes.PermissionDenied, }, { - description: "ExecuteOptimisticBlock with 5 txs and no deposit tx", + description: "ExecuteBlock with 5 txs and no deposit tx", callGenesisInfoAndGetCommitmentState: true, numberOfTxs: 5, prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(), @@ -230,7 +230,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { expectedReturnCode: 0, }, { - description: "ExecuteOptimisticBlock with 5 txs and a deposit tx", + description: "ExecuteBlock with 5 txs and a deposit tx", callGenesisInfoAndGetCommitmentState: true, numberOfTxs: 5, prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(), @@ -238,6 +238,15 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { depositTxAmount: big.NewInt(1000000000000000000), expectedReturnCode: 0, }, + { + description: "ExecuteBlock with incorrect previous block hash", + callGenesisInfoAndGetCommitmentState: true, + numberOfTxs: 5, + prevBlockHash: ethservice.BlockChain().GetBlockByNumber(2).Hash().Bytes(), + timestamp: ethservice.BlockChain().GetBlockByNumber(2).Time() + 2, + depositTxAmount: big.NewInt(0), + expectedReturnCode: codes.FailedPrecondition, + }, } for _, tt := range tests { @@ -305,27 +314,21 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { marshalledTxs = append(marshalledTxs, depositTx) } - optimisticHeadCh := make(chan core.ChainOptimisticHeadEvent, 1) - optimsticHeadSub := ethservice.BlockChain().SubscribeChainOptimisticHeadEvent(optimisticHeadCh) - defer optimsticHeadSub.Unsubscribe() - - baseBlockReq := &optimsticPb.BaseBlock{ + executeBlockReq := &astriaPb.ExecuteBlockRequest{ + PrevBlockHash: tt.prevBlockHash, Timestamp: ×tamppb.Timestamp{ Seconds: int64(tt.timestamp), }, - Transactions: marshalledTxs, - SequencerBlockHash: []byte("test_hash"), + Transactions: marshalledTxs, } - res, err := serviceV1Alpha1.ExecuteOptimisticBlock(context.Background(), baseBlockReq) + executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) if tt.expectedReturnCode > 0 { - require.NotNil(t, err, "ExecuteOptimisticBlock should return an error") - require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteOptimisticBlock failed") - } else { - require.Nil(t, err, "ExecuteOptimisticBlock failed") + require.NotNil(t, err, "ExecuteBlock should return an error") + require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteBlock failed") } if err == nil { - require.NotNil(t, res, "ExecuteOptimisticBlock response is nil") + require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") astriaOrdered := ethservice.TxPool().AstriaOrdered() require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") @@ -335,38 +338,148 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { require.Nil(t, err, "GetCommitmentState failed") require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated") + } - // check if the optimistic block is set - optimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock() - require.NotNil(t, optimisticBlock, "Optimistic block is not set") + }) + } +} - // check if the optimistic block is correct - require.Equal(t, common.BytesToHash(res.Hash), optimisticBlock.Hash(), "Optimistic block hashes do not match") - require.Equal(t, common.BytesToHash(res.ParentBlockHash), optimisticBlock.ParentHash, "Optimistic block parent hashes do not match") - require.Equal(t, uint64(res.Number), optimisticBlock.Number.Uint64(), "Optimistic block numbers do not match") +func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testing.T) { + ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) - // check if optimistic block is inserted into chain - block := ethservice.BlockChain().GetBlockByHash(optimisticBlock.Hash()) - require.NotNil(t, block, "Optimistic block not found in blockchain") - require.Equal(t, uint64(res.Number), block.NumberU64(), "Block number is not correct") + // call genesis info + genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + require.Nil(t, err, "GetGenesisInfo failed") + require.NotNil(t, genesisInfo, "GenesisInfo is nil") - // timeout for optimistic head event - select { - case blockEvent := <-optimisticHeadCh: - require.NotNil(t, blockEvent, "Optimistic head event not received") - require.Equal(t, block.Hash(), blockEvent.Block.Hash(), "Optimistic head event block hash is not correct") - require.Equal(t, block.NumberU64(), blockEvent.Block.NumberU64(), "Optimistic head event block number is not correct") - case <-time.After(2 * time.Second): - require.FailNow(t, "Optimistic head event not received") - case err := <-optimsticHeadSub.Err(): - require.Nil(t, err, "Optimistic head event subscription failed") - } - } + // call get commitment state + commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + require.Nil(t, err, "GetCommitmentState failed") + require.NotNil(t, commitmentState, "CommitmentState is nil") + + // get previous block hash + previousBlock := ethservice.BlockChain().CurrentSafeBlock() + require.NotNil(t, previousBlock, "Previous block not found") + + // create 5 txs + txs := []*types.Transaction{} + marshalledTxs := []*sequencerblockv1.RollupData{} + for i := 0; i < 5; i++ { + unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + require.Nil(t, err, "Failed to sign tx") + txs = append(txs, tx) + + marshalledTx, err := tx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } + + amountToDeposit := big.NewInt(1000000000000000000) + depositAmount := bigIntToProtoU128(amountToDeposit) + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + + // create new chain destination address for better testing + chainDestinationAddressPrivKey, err := crypto.GenerateKey() + require.Nil(t, err, "Failed to generate chain destination address") + + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey) + + stateDb, err := ethservice.BlockChain().State() + require.Nil(t, err, "Failed to get state db") + require.NotNil(t, stateDb, "State db is nil") + + chainDestinationAddressBalanceBefore := stateDb.GetBalance(chainDestinationAddress) + + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: depositAmount, + RollupId: genesisInfo.RollupId, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + marshalledTxs = append(marshalledTxs, depositTx) + + executeBlockReq := &astriaPb.ExecuteBlockRequest{ + PrevBlockHash: previousBlock.Hash().Bytes(), + Timestamp: ×tamppb.Timestamp{ + Seconds: int64(previousBlock.Time + 2), + }, + Transactions: marshalledTxs, + } + + executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + require.Nil(t, err, "ExecuteBlock failed") + + require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") + + // check if astria ordered txs are cleared + astriaOrdered := ethservice.TxPool().AstriaOrdered() + require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") + + // call update commitment state to set the block we executed as soft and firm + updateCommitmentStateReq := &astriaPb.UpdateCommitmentStateRequest{ + CommitmentState: &astriaPb.CommitmentState{ + Soft: &astriaPb.Block{ + Hash: executeBlockRes.Hash, + ParentBlockHash: executeBlockRes.ParentBlockHash, + Number: executeBlockRes.Number, + Timestamp: executeBlockRes.Timestamp, + }, + Firm: &astriaPb.Block{ + Hash: executeBlockRes.Hash, + ParentBlockHash: executeBlockRes.ParentBlockHash, + Number: executeBlockRes.Number, + Timestamp: executeBlockRes.Timestamp, + }, + BaseCelestiaHeight: commitmentState.BaseCelestiaHeight + 1, + }, + } + + updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) + require.Nil(t, err, "UpdateCommitmentState failed") + require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil") + require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request") + + // get the soft and firm block + softBlock := ethservice.BlockChain().CurrentSafeBlock() + require.NotNil(t, softBlock, "SoftBlock is nil") + firmBlock := ethservice.BlockChain().CurrentFinalBlock() + require.NotNil(t, firmBlock, "FirmBlock is nil") + + // check if the soft and firm block are set correctly + require.True(t, bytes.Equal(softBlock.Hash().Bytes(), updateCommitmentStateRes.Soft.Hash), "Soft Block Hashes do not match") + require.True(t, bytes.Equal(softBlock.ParentHash.Bytes(), updateCommitmentStateRes.Soft.ParentBlockHash), "Soft Block Parent Hash do not match") + require.Equal(t, softBlock.Number.Uint64(), uint64(updateCommitmentStateRes.Soft.Number), "Soft Block Number do not match") + + require.True(t, bytes.Equal(firmBlock.Hash().Bytes(), updateCommitmentStateRes.Firm.Hash), "Firm Block Hashes do not match") + require.True(t, bytes.Equal(firmBlock.ParentHash.Bytes(), updateCommitmentStateRes.Firm.ParentBlockHash), "Firm Block Parent Hash do not match") + require.Equal(t, firmBlock.Number.Uint64(), uint64(updateCommitmentStateRes.Firm.Number), "Firm Block Number do not match") + + celestiaBaseHeight := ethservice.BlockChain().CurrentBaseCelestiaHeight() + require.Equal(t, celestiaBaseHeight, updateCommitmentStateRes.BaseCelestiaHeight, "BaseCelestiaHeight should be updated in db") + + // check the difference in balances after deposit tx + stateDb, err = ethservice.BlockChain().State() + require.Nil(t, err, "Failed to get state db") + require.NotNil(t, stateDb, "State db is nil") + chainDestinationAddressBalanceAfter := stateDb.GetBalance(chainDestinationAddress) + + balanceDiff := new(uint256.Int).Sub(chainDestinationAddressBalanceAfter, chainDestinationAddressBalanceBefore) + require.True(t, balanceDiff.Cmp(uint256.NewInt(1000000000000000000)) == 0, "Chain destination address balance is not correct") } -func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { +func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { ethservice, _ := setupExecutionService(t, 10) tests := []struct { @@ -379,7 +492,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { expectedReturnCode codes.Code }{ { - description: "ExecuteBlock without calling GetGenesisInfo and GetCommitmentState", + description: "ExecuteOptimisticBlock without calling GetGenesisInfo and GetCommitmentState", callGenesisInfoAndGetCommitmentState: false, numberOfTxs: 5, prevBlockHash: ethservice.BlockChain().GetBlockByNumber(2).Hash().Bytes(), @@ -388,7 +501,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { expectedReturnCode: codes.PermissionDenied, }, { - description: "ExecuteBlock with 5 txs and no deposit tx", + description: "ExecuteOptimisticBlock with 5 txs and no deposit tx", callGenesisInfoAndGetCommitmentState: true, numberOfTxs: 5, prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(), @@ -397,7 +510,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { expectedReturnCode: 0, }, { - description: "ExecuteBlock with 5 txs and a deposit tx", + description: "ExecuteOptimisticBlock with 5 txs and a deposit tx", callGenesisInfoAndGetCommitmentState: true, numberOfTxs: 5, prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(), @@ -405,15 +518,6 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { depositTxAmount: big.NewInt(1000000000000000000), expectedReturnCode: 0, }, - { - description: "ExecuteBlock with incorrect previous block hash", - callGenesisInfoAndGetCommitmentState: true, - numberOfTxs: 5, - prevBlockHash: ethservice.BlockChain().GetBlockByNumber(2).Hash().Bytes(), - timestamp: ethservice.BlockChain().GetBlockByNumber(2).Time() + 2, - depositTxAmount: big.NewInt(0), - expectedReturnCode: codes.FailedPrecondition, - }, } for _, tt := range tests { @@ -438,7 +542,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { // create the txs to send // create 5 txs txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1.RollupData{} + marshalledTxs := []*sequencerblockv1alpha1.RollupData{} for i := 0; i < 5; i++ { unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) @@ -447,8 +551,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ + Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } @@ -464,7 +568,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey) - depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + depositTx := &sequencerblockv1alpha1.RollupData{Value: &sequencerblockv1alpha1.RollupData_Deposit{Deposit: &sequencerblockv1alpha1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: bridgeAddress, }, @@ -481,21 +585,27 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { marshalledTxs = append(marshalledTxs, depositTx) } - executeBlockReq := &astriaPb.ExecuteBlockRequest{ - PrevBlockHash: tt.prevBlockHash, + optimisticHeadCh := make(chan core.ChainOptimisticHeadEvent, 1) + optimsticHeadSub := ethservice.BlockChain().SubscribeChainOptimisticHeadEvent(optimisticHeadCh) + defer optimsticHeadSub.Unsubscribe() + + baseBlockReq := &optimsticPb.BaseBlock{ Timestamp: ×tamppb.Timestamp{ Seconds: int64(tt.timestamp), }, - Transactions: marshalledTxs, + Transactions: marshalledTxs, + SequencerBlockHash: []byte("test_hash"), } - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + res, err := serviceV1Alpha1.ExecuteOptimisticBlock(context.Background(), baseBlockReq) if tt.expectedReturnCode > 0 { - require.NotNil(t, err, "ExecuteBlock should return an error") - require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteBlock failed") + require.NotNil(t, err, "ExecuteOptimisticBlock should return an error") + require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteOptimisticBlock failed") + } else { + require.Nil(t, err, "ExecuteOptimisticBlock failed") } if err == nil { - require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") + require.NotNil(t, res, "ExecuteOptimisticBlock response is nil") astriaOrdered := ethservice.TxPool().AstriaOrdered() require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") @@ -505,13 +615,38 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { require.Nil(t, err, "GetCommitmentState failed") require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated") - } + // check if the optimistic block is set + optimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock() + require.NotNil(t, optimisticBlock, "Optimistic block is not set") + + // check if the optimistic block is correct + require.Equal(t, common.BytesToHash(res.Hash), optimisticBlock.Hash(), "Optimistic block hashes do not match") + require.Equal(t, common.BytesToHash(res.ParentBlockHash), optimisticBlock.ParentHash, "Optimistic block parent hashes do not match") + require.Equal(t, uint64(res.Number), optimisticBlock.Number.Uint64(), "Optimistic block numbers do not match") + + // check if optimistic block is inserted into chain + block := ethservice.BlockChain().GetBlockByHash(optimisticBlock.Hash()) + require.NotNil(t, block, "Optimistic block not found in blockchain") + require.Equal(t, uint64(res.Number), block.NumberU64(), "Block number is not correct") + + // timeout for optimistic head event + select { + case blockEvent := <-optimisticHeadCh: + require.NotNil(t, blockEvent, "Optimistic head event not received") + require.Equal(t, block.Hash(), blockEvent.Block.Hash(), "Optimistic head event block hash is not correct") + require.Equal(t, block.NumberU64(), blockEvent.Block.NumberU64(), "Optimistic head event block number is not correct") + case <-time.After(2 * time.Second): + require.FailNow(t, "Optimistic head event not received") + case err := <-optimsticHeadSub.Err(): + require.Nil(t, err, "Optimistic head event subscription failed") + } + } }) } } -func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testing.T) { +func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing.T) { ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) // call genesis info @@ -528,9 +663,12 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi previousBlock := ethservice.BlockChain().CurrentSafeBlock() require.NotNil(t, previousBlock, "Previous block not found") - // create 5 txs + requestStreams := []*optimsticPb.StreamExecuteOptimisticBlockRequest{} + sequencerBlockHash := []byte("sequencer_block_hash") + + // create 1 stream item with 5 txs txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1.RollupData{} + marshalledTxs := []*sequencerblockv1alpha1.RollupData{} for i := 0; i < 5; i++ { unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) @@ -539,111 +677,70 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ + Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } - amountToDeposit := big.NewInt(1000000000000000000) - depositAmount := bigIntToProtoU128(amountToDeposit) - bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress - bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom - - // create new chain destination address for better testing - chainDestinationAddressPrivKey, err := crypto.GenerateKey() - require.Nil(t, err, "Failed to generate chain destination address") - - chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey) - - stateDb, err := ethservice.BlockChain().State() - require.Nil(t, err, "Failed to get state db") - require.NotNil(t, stateDb, "State db is nil") - - chainDestinationAddressBalanceBefore := stateDb.GetBalance(chainDestinationAddress) - - depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ - BridgeAddress: &primitivev1.Address{ - Bech32M: bridgeAddress, - }, - Asset: bridgeAssetDenom, - Amount: depositAmount, - RollupId: genesisInfo.RollupId, - DestinationChainAddress: chainDestinationAddress.String(), - SourceTransactionId: &primitivev1.TransactionId{ - Inner: "test_tx_hash", - }, - SourceActionIndex: 0, - }}} + errs := ethservice.TxPool().Add(txs, true, false) + for _, err := range errs { + require.Nil(t, err, "Failed to add tx to mempool") + } - marshalledTxs = append(marshalledTxs, depositTx) + pendingTxs := ethservice.TxPool().Pending(txpool.PendingFilter{OnlyPlainTxs: true}) + require.Len(t, pendingTxs, 1, "Mempool should have 1 tx") + addrTxs := pendingTxs[testAddr] + require.Len(t, addrTxs, 5, "Mempool should have 5 txs for test address") - executeBlockReq := &astriaPb.ExecuteBlockRequest{ - PrevBlockHash: previousBlock.Hash().Bytes(), + req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ + SequencerBlockHash: sequencerBlockHash, + Transactions: marshalledTxs, Timestamp: ×tamppb.Timestamp{ Seconds: int64(previousBlock.Time + 2), }, - Transactions: marshalledTxs, - } + }} - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) - require.Nil(t, err, "ExecuteBlock failed") - - require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") + requestStreams = append(requestStreams, &req) - // check if astria ordered txs are cleared - astriaOrdered := ethservice.TxPool().AstriaOrdered() - require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - - // call update commitment state to set the block we executed as soft and firm - updateCommitmentStateReq := &astriaPb.UpdateCommitmentStateRequest{ - CommitmentState: &astriaPb.CommitmentState{ - Soft: &astriaPb.Block{ - Hash: executeBlockRes.Hash, - ParentBlockHash: executeBlockRes.ParentBlockHash, - Number: executeBlockRes.Number, - Timestamp: executeBlockRes.Timestamp, - }, - Firm: &astriaPb.Block{ - Hash: executeBlockRes.Hash, - ParentBlockHash: executeBlockRes.ParentBlockHash, - Number: executeBlockRes.Number, - Timestamp: executeBlockRes.Timestamp, - }, - BaseCelestiaHeight: commitmentState.BaseCelestiaHeight + 1, - }, + mockStream := &MockBidirectionalStreaming[optimsticPb.StreamExecuteOptimisticBlockRequest, optimsticPb.StreamExecuteOptimisticBlockResponse]{ + requestStream: requestStreams, + accumulatedResponses: []*optimsticPb.StreamExecuteOptimisticBlockResponse{}, + requestCounter: 0, } - updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) - require.Nil(t, err, "UpdateCommitmentState failed") - require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil") - require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request") + errorCh := make(chan error) + go func(errorCh chan error) { + errorCh <- serviceV1Alpha1.StreamExecuteOptimisticBlock(mockStream) + }(errorCh) - // get the soft and firm block - softBlock := ethservice.BlockChain().CurrentSafeBlock() - require.NotNil(t, softBlock, "SoftBlock is nil") - firmBlock := ethservice.BlockChain().CurrentFinalBlock() - require.NotNil(t, firmBlock, "FirmBlock is nil") + select { + // stream either errors out of gets closed + case err := <-errorCh: + require.Nil(t, err, "StreamExecuteOptimisticBlock failed") + } - // check if the soft and firm block are set correctly - require.True(t, bytes.Equal(softBlock.Hash().Bytes(), updateCommitmentStateRes.Soft.Hash), "Soft Block Hashes do not match") - require.True(t, bytes.Equal(softBlock.ParentHash.Bytes(), updateCommitmentStateRes.Soft.ParentBlockHash), "Soft Block Parent Hash do not match") - require.Equal(t, softBlock.Number.Uint64(), uint64(updateCommitmentStateRes.Soft.Number), "Soft Block Number do not match") + accumulatedResponses := mockStream.accumulatedResponses - require.True(t, bytes.Equal(firmBlock.Hash().Bytes(), updateCommitmentStateRes.Firm.Hash), "Firm Block Hashes do not match") - require.True(t, bytes.Equal(firmBlock.ParentHash.Bytes(), updateCommitmentStateRes.Firm.ParentBlockHash), "Firm Block Parent Hash do not match") - require.Equal(t, firmBlock.Number.Uint64(), uint64(updateCommitmentStateRes.Firm.Number), "Firm Block Number do not match") + require.Equal(t, len(accumulatedResponses), len(mockStream.requestStream), "Number of responses should match the number of requests") - celestiaBaseHeight := ethservice.BlockChain().CurrentBaseCelestiaHeight() - require.Equal(t, celestiaBaseHeight, updateCommitmentStateRes.BaseCelestiaHeight, "BaseCelestiaHeight should be updated in db") + blockCounter := 1 + for _, response := range accumulatedResponses { + require.True(t, bytes.Equal(response.GetBaseSequencerBlockHash(), sequencerBlockHash), "Sequencer block hash does not match") + block := response.GetBlock() + require.True(t, bytes.Equal(block.ParentBlockHash, previousBlock.Hash().Bytes()), "Parent block hash does not match") + requiredBlockNumber := big.NewInt(0).Add(previousBlock.Number, big.NewInt(int64(blockCounter))) + require.Equal(t, requiredBlockNumber.Uint64(), uint64(block.Number), "Block number is not correct") + blockCounter += 1 + } - // check the difference in balances after deposit tx - stateDb, err = ethservice.BlockChain().State() - require.Nil(t, err, "Failed to get state db") - require.NotNil(t, stateDb, "State db is nil") - chainDestinationAddressBalanceAfter := stateDb.GetBalance(chainDestinationAddress) + // ensure mempool is cleared + astriaOrdered := ethservice.TxPool().AstriaOrdered() + require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - balanceDiff := new(uint256.Int).Sub(chainDestinationAddressBalanceAfter, chainDestinationAddressBalanceBefore) - require.True(t, balanceDiff.Cmp(uint256.NewInt(1000000000000000000)) == 0, "Chain destination address balance is not correct") + pending := ethservice.TxPool().Pending(txpool.PendingFilter{ + OnlyPlainTxs: true, + }) + require.Len(t, pending, 0, "Mempool should be empty") } // Check that invalid transactions are not added into a block and are removed from the mempool From b0ec901dcaf6b651f011db7a1917940a69f64b41 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 1 Oct 2024 10:37:34 +0530 Subject: [PATCH 15/79] use generics to implement mock bi directional stream --- grpc/execution/mock_grpc_stream.go | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/grpc/execution/mock_grpc_stream.go b/grpc/execution/mock_grpc_stream.go index 5349418ba..98f411fb2 100644 --- a/grpc/execution/mock_grpc_stream.go +++ b/grpc/execution/mock_grpc_stream.go @@ -1,20 +1,19 @@ package execution import ( - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" "context" "google.golang.org/grpc/metadata" "io" "time" ) -type MockStream struct { - requestStream []*optimsticPb.StreamExecuteOptimisticBlockRequest - accumulatedResponses []*optimsticPb.StreamExecuteOptimisticBlockResponse +type MockBidirectionalStreaming[K any, V any] struct { + requestStream []*K + accumulatedResponses []*V requestCounter uint64 } -func (ms *MockStream) Recv() (*optimsticPb.StreamExecuteOptimisticBlockRequest, error) { +func (ms *MockBidirectionalStreaming[K, V]) Recv() (*K, error) { // add a delay to make it look like an async stream time.Sleep(2 * time.Second) if ms.requestCounter > uint64(len(ms.requestStream)-1) { @@ -28,31 +27,31 @@ func (ms *MockStream) Recv() (*optimsticPb.StreamExecuteOptimisticBlockRequest, return req, nil } -func (ms *MockStream) Send(res *optimsticPb.StreamExecuteOptimisticBlockResponse) error { +func (ms *MockBidirectionalStreaming[K, V]) Send(res *V) error { ms.accumulatedResponses = append(ms.accumulatedResponses, res) return nil } -func (ms *MockStream) SetHeader(md metadata.MD) error { +func (ms *MockBidirectionalStreaming[K, V]) SetHeader(md metadata.MD) error { panic("implement me") } -func (ms *MockStream) SendHeader(md metadata.MD) error { +func (ms *MockBidirectionalStreaming[K, V]) SendHeader(md metadata.MD) error { panic("implement me") } -func (ms *MockStream) SetTrailer(md metadata.MD) { +func (ms *MockBidirectionalStreaming[K, V]) SetTrailer(md metadata.MD) { panic("implement me") } -func (ms *MockStream) Context() context.Context { +func (ms *MockBidirectionalStreaming[K, V]) Context() context.Context { return context.Background() } -func (ms *MockStream) SendMsg(m any) error { +func (ms *MockBidirectionalStreaming[K, V]) SendMsg(m any) error { panic("implement me") } -func (ms *MockStream) RecvMsg(m any) error { +func (ms *MockBidirectionalStreaming[K, V]) RecvMsg(m any) error { panic("implement me") } From 56dc5cd3b39d979779a953caab4fb272cd78bf44 Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 16 Oct 2024 23:28:08 +0530 Subject: [PATCH 16/79] wip --- grpc/execution/server_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 076efadd9..59cddea30 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -10,6 +10,7 @@ import ( "crypto/sha256" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" From 98edfc91512c79b1419ad0a91618237dcb10ccc3 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 29 Oct 2024 14:13:28 +0530 Subject: [PATCH 17/79] use an atomic pointer for sequencer block hash --- grpc/execution/server.go | 24 ++++++++++++++---------- grpc/execution/server_test.go | 17 ++++++++--------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 54bda5798..95c3d7b8b 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -12,6 +12,7 @@ import ( "io" "math/big" "sync" + "sync/atomic" "time" optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" @@ -56,7 +57,7 @@ type ExecutionServiceServerV1 struct { nextFeeRecipient common.Address // Fee recipient for the next block - currentOptimisticSequencerBlock []byte + currentOptimisticSequencerBlock atomic.Pointer[[]byte] } var ( @@ -153,14 +154,17 @@ func NewExecutionServiceServerV1(eth *eth.Ethereum) (*ExecutionServiceServerV1, } } - return &ExecutionServiceServerV1{ - eth: eth, - bc: bc, - bridgeAddresses: bridgeAddresses, - bridgeAllowedAssets: bridgeAllowedAssets, - nextFeeRecipient: nextFeeRecipient, - currentOptimisticSequencerBlock: []byte{}, - }, nil + execServiceServerV1Alpha2 := ExecutionServiceServerV1{ + eth: eth, + bc: bc, + bridgeAddresses: bridgeAddresses, + bridgeAllowedAssets: bridgeAllowedAssets, + nextFeeRecipient: nextFeeRecipient, + } + + execServiceServerV1Alpha2.currentOptimisticSequencerBlock.Store(&[]byte{}) + + return &execServiceServerV1Alpha2, nil } func (s *ExecutionServiceServerV1) GetGenesisInfo(ctx context.Context, req *astriaPb.GetGenesisInfoRequest) (*astriaPb.GenesisInfo, error) { @@ -271,7 +275,7 @@ func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisti if event.NewHead.Hash() != optimisticBlockHash { return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") } - s.currentOptimisticSequencerBlock = baseBlock.SequencerBlockHash + s.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) err = stream.Send(&optimsticPb.StreamExecuteOptimisticBlockResponse{ Block: optimisticBlock, BaseSequencerBlockHash: baseBlock.SequencerBlockHash, diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 59cddea30..e6e4d4a03 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -10,7 +10,6 @@ import ( "crypto/sha256" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" @@ -688,10 +687,9 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. require.Nil(t, err, "Failed to add tx to mempool") } - pendingTxs := ethservice.TxPool().Pending(txpool.PendingFilter{OnlyPlainTxs: true}) - require.Len(t, pendingTxs, 1, "Mempool should have 1 tx") - addrTxs := pendingTxs[testAddr] - require.Len(t, addrTxs, 5, "Mempool should have 5 txs for test address") + pending, queued := ethservice.TxPool().Stats() + require.Equal(t, pending, 5, "Mempool should have 5 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ SequencerBlockHash: sequencerBlockHash, @@ -724,6 +722,8 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. require.Equal(t, len(accumulatedResponses), len(mockStream.requestStream), "Number of responses should match the number of requests") + require.True(t, bytes.Equal(*serviceV1Alpha1.currentOptimisticSequencerBlock.Load(), sequencerBlockHash), "Optimistic sequencer block hash should be set correctly") + blockCounter := 1 for _, response := range accumulatedResponses { require.True(t, bytes.Equal(response.GetBaseSequencerBlockHash(), sequencerBlockHash), "Sequencer block hash does not match") @@ -738,10 +738,9 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. astriaOrdered := ethservice.TxPool().AstriaOrdered() require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - pending := ethservice.TxPool().Pending(txpool.PendingFilter{ - OnlyPlainTxs: true, - }) - require.Len(t, pending, 0, "Mempool should be empty") + pending, queued = ethservice.TxPool().Stats() + require.Equal(t, pending, 0, "Mempool should have 0 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") } // Check that invalid transactions are not added into a block and are removed from the mempool From c9299570b0cca43c725d9bb010a19d70f1c77a4f Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 24 Oct 2024 14:04:43 +0530 Subject: [PATCH 18/79] reduce mempool clearing timeout --- grpc/execution/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 95c3d7b8b..7f364c0b5 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -280,7 +280,7 @@ func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisti Block: optimisticBlock, BaseSequencerBlockHash: baseBlock.SequencerBlockHash, }) - case <-time.After(10 * time.Second): + case <-time.After(500 * time.Millisecond): return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") case err := <-mempoolClearingEvent.Err(): return status.Error(codes.Internal, fmt.Sprintf("error waiting for mempool clearing event: %v", err)) From a1152d75b4fa6d90953d4f805e0fe504ecd1ab28 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 29 Oct 2024 14:28:47 +0530 Subject: [PATCH 19/79] fix imports --- grpc/execution/server.go | 6 +++--- grpc/execution/server_test.go | 22 +++++++++++----------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 7f364c0b5..2eef6f0ba 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -245,7 +245,7 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } -func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_StreamExecuteOptimisticBlockServer) error { +func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() @@ -260,7 +260,7 @@ func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisti return err } - baseBlock := msg.GetBlock() + baseBlock := msg.GetBaseBlock() // execute the optimistic block and wait for the mempool clearing event optimisticBlock, err := s.ExecuteOptimisticBlock(stream.Context(), baseBlock) @@ -276,7 +276,7 @@ func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisti return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") } s.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) - err = stream.Send(&optimsticPb.StreamExecuteOptimisticBlockResponse{ + err = stream.Send(&optimsticPb.ExecuteOptimisticBlockStreamResponse{ Block: optimisticBlock, BaseSequencerBlockHash: baseBlock.SequencerBlockHash, }) diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index e6e4d4a03..40517799a 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -542,7 +542,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { // create the txs to send // create 5 txs txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) @@ -551,8 +551,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ - Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } @@ -568,7 +568,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey) - depositTx := &sequencerblockv1alpha1.RollupData{Value: &sequencerblockv1alpha1.RollupData_Deposit{Deposit: &sequencerblockv1alpha1.Deposit{ + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: bridgeAddress, }, @@ -663,12 +663,12 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. previousBlock := ethservice.BlockChain().CurrentSafeBlock() require.NotNil(t, previousBlock, "Previous block not found") - requestStreams := []*optimsticPb.StreamExecuteOptimisticBlockRequest{} + requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{} sequencerBlockHash := []byte("sequencer_block_hash") // create 1 stream item with 5 txs txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) @@ -677,8 +677,8 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ - Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } @@ -691,7 +691,7 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. require.Equal(t, pending, 5, "Mempool should have 5 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ + req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{ SequencerBlockHash: sequencerBlockHash, Transactions: marshalledTxs, Timestamp: ×tamppb.Timestamp{ @@ -701,9 +701,9 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. requestStreams = append(requestStreams, &req) - mockStream := &MockBidirectionalStreaming[optimsticPb.StreamExecuteOptimisticBlockRequest, optimsticPb.StreamExecuteOptimisticBlockResponse]{ + mockStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{ requestStream: requestStreams, - accumulatedResponses: []*optimsticPb.StreamExecuteOptimisticBlockResponse{}, + accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{}, requestCounter: 0, } From 17a333cae50d791df6dd5a343e345020c142bba7 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 5 Nov 2024 11:27:44 +0530 Subject: [PATCH 20/79] update grpc method names --- grpc/execution/server.go | 2 +- grpc/execution/server_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 2eef6f0ba..768a29fb8 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -245,7 +245,7 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } -func (s *ExecutionServiceServerV1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { +func (s *ExecutionServiceServerV1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 40517799a..1a54bb55b 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -646,7 +646,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing.T) { +func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlockStream(t *testing.T) { ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) // call genesis info @@ -709,7 +709,7 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. errorCh := make(chan error) go func(errorCh chan error) { - errorCh <- serviceV1Alpha1.StreamExecuteOptimisticBlock(mockStream) + errorCh <- serviceV1Alpha1.ExecuteOptimisticBlockStream(mockStream) }(errorCh) select { From f1356b24c6b34c810189440a2c5114e5dedb7105 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 17:02:45 +0530 Subject: [PATCH 21/79] only allow 1 client to be connected to the execute optimistic block stream --- grpc/execution/server.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 768a29fb8..b80ba8e51 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -58,6 +58,9 @@ type ExecutionServiceServerV1 struct { nextFeeRecipient common.Address // Fee recipient for the next block currentOptimisticSequencerBlock atomic.Pointer[[]byte] + + executeBlockStreamConnected atomic.Bool + bundleStreamConnected atomic.Bool } var ( @@ -246,6 +249,12 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { } func (s *ExecutionServiceServerV1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { + if !s.executeBlockStreamConnected.CompareAndSwap(false, true) { + return status.Error(codes.PermissionDenied, "Execute optimistic block stream already connected") + } + + defer s.executeBlockStreamConnected.Store(false) + mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() From e37bba309e9a9a782f371e2efd6a662e8c6b6aee Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 17:04:07 +0530 Subject: [PATCH 22/79] rename executeBlockStreamConnected to executeOptimisticBlockStreamConnected --- grpc/execution/server.go | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index b80ba8e51..9ad5c8802 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -59,8 +59,7 @@ type ExecutionServiceServerV1 struct { currentOptimisticSequencerBlock atomic.Pointer[[]byte] - executeBlockStreamConnected atomic.Bool - bundleStreamConnected atomic.Bool + executeOptimisticBlockStreamConnected atomic.Bool } var ( @@ -249,11 +248,11 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { } func (s *ExecutionServiceServerV1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { - if !s.executeBlockStreamConnected.CompareAndSwap(false, true) { + if !s.executeOptimisticBlockStreamConnected.CompareAndSwap(false, true) { return status.Error(codes.PermissionDenied, "Execute optimistic block stream already connected") } - defer s.executeBlockStreamConnected.Store(false) + defer s.executeOptimisticBlockStreamConnected.Store(false) mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) From db067e14ccec1811426b9b508bbffb5541b50283 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 2 Dec 2024 17:55:30 +0530 Subject: [PATCH 23/79] remove restrictions to allow just one client to connect to the optimistic grpc stream servers --- grpc/execution/server.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 9ad5c8802..768a29fb8 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -58,8 +58,6 @@ type ExecutionServiceServerV1 struct { nextFeeRecipient common.Address // Fee recipient for the next block currentOptimisticSequencerBlock atomic.Pointer[[]byte] - - executeOptimisticBlockStreamConnected atomic.Bool } var ( @@ -248,12 +246,6 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { } func (s *ExecutionServiceServerV1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { - if !s.executeOptimisticBlockStreamConnected.CompareAndSwap(false, true) { - return status.Error(codes.PermissionDenied, "Execute optimistic block stream already connected") - } - - defer s.executeOptimisticBlockStreamConnected.Store(false) - mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() From c529b84dd18f13c9b99bab01caf52ffac2a6f12d Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 5 Nov 2024 11:29:16 +0530 Subject: [PATCH 24/79] implement bundle streaming --- grpc/execution/server.go | 48 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 768a29fb8..346de5f30 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -9,6 +9,7 @@ import ( "crypto/sha256" "errors" "fmt" + cmath "github.com/ethereum/go-ethereum/common/math" "io" "math/big" "sync" @@ -245,6 +246,51 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } +func (s *ExecutionServiceServerV1) StreamBundles(stream optimisticGrpc.BundleService_GetBundleStreamServer) error { + pendingTxEventCh := make(chan core.NewTxsEvent) + pendingTxEvent := s.eth.TxPool().SubscribeTransactions(pendingTxEventCh, false) + defer pendingTxEvent.Unsubscribe() + + for { + select { + case pendingTxs := <-pendingTxEventCh: + // get the optimistic block + // this is an in-memory read, so there shouldn't be a lot of concerns on speed + optimisticBlock := s.eth.BlockChain().CurrentOptimisticBlock() + + totalCost := big.NewInt(0) + marshalledTxs := make([][]byte, len(pendingTxs.Txs)) + bundle := optimsticPb.Bundle{} + for _, pendingTx := range pendingTxs.Txs { + effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee)) + totalCost.Add(totalCost, effectiveTip) + baseFee := new(big.Int).SetUint64(pendingTx.Gas()) + baseFee.Mul(baseFee, optimisticBlock.BaseFee) + totalCost.Add(totalCost, baseFee) + + marshalledTx, err := pendingTx.MarshalBinary() + if err != nil { + return status.Errorf(codes.Internal, "error marshalling tx: %v", err) + } + marshalledTxs = append(marshalledTxs, marshalledTx) + } + + bundle.Fee = totalCost.Uint64() + bundle.Transactions = marshalledTxs + bundle.BaseSequencerBlockHash = *s.currentOptimisticSequencerBlock.Load() + bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() + + err := stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) + if err != nil { + return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) + } + + case err := <-pendingTxEvent.Err(): + return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) + } + } +} + func (s *ExecutionServiceServerV1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) @@ -283,7 +329,7 @@ func (s *ExecutionServiceServerV1) ExecuteOptimisticBlockStream(stream optimisti case <-time.After(500 * time.Millisecond): return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") case err := <-mempoolClearingEvent.Err(): - return status.Error(codes.Internal, fmt.Sprintf("error waiting for mempool clearing event: %v", err)) + return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) } } } From bea3622f221862ff9ff2ecb735d8a8dbef50157e Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 5 Nov 2024 11:30:17 +0530 Subject: [PATCH 25/79] update unit tests --- grpc/execution/mock_grpc_stream.go | 34 +++++++ grpc/execution/server.go | 23 ++--- grpc/execution/server_test.go | 143 ++++++++++++++++++++++++++++- 3 files changed, 188 insertions(+), 12 deletions(-) diff --git a/grpc/execution/mock_grpc_stream.go b/grpc/execution/mock_grpc_stream.go index 98f411fb2..6ab6ce78a 100644 --- a/grpc/execution/mock_grpc_stream.go +++ b/grpc/execution/mock_grpc_stream.go @@ -55,3 +55,37 @@ func (ms *MockBidirectionalStreaming[K, V]) SendMsg(m any) error { func (ms *MockBidirectionalStreaming[K, V]) RecvMsg(m any) error { panic("implement me") } + +type MockServerSideStreaming[K any] struct { + sentResponses []*K +} + +func (ms *MockServerSideStreaming[K]) SendMsg(m any) error { + //TODO implement me + panic("implement me") +} + +func (ms *MockServerSideStreaming[K]) Send(res *K) error { + ms.sentResponses = append(ms.sentResponses, res) + return nil +} + +func (ms *MockServerSideStreaming[K]) SetHeader(md metadata.MD) error { + panic("implement me") +} + +func (ms *MockServerSideStreaming[K]) SendHeader(md metadata.MD) error { + panic("implement me") +} + +func (ms *MockServerSideStreaming[K]) SetTrailer(md metadata.MD) { + panic("implement me") +} + +func (ms *MockServerSideStreaming[K]) Context() context.Context { + return context.Background() +} + +func (ms *MockServerSideStreaming[K]) RecvMsg(m any) error { + panic("implement me") +} diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 346de5f30..a8e2d699b 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -258,31 +258,32 @@ func (s *ExecutionServiceServerV1) StreamBundles(stream optimisticGrpc.BundleSer // this is an in-memory read, so there shouldn't be a lot of concerns on speed optimisticBlock := s.eth.BlockChain().CurrentOptimisticBlock() - totalCost := big.NewInt(0) - marshalledTxs := make([][]byte, len(pendingTxs.Txs)) - bundle := optimsticPb.Bundle{} for _, pendingTx := range pendingTxs.Txs { + bundle := optimsticPb.Bundle{} + + totalCost := big.NewInt(0) effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee)) totalCost.Add(totalCost, effectiveTip) baseFee := new(big.Int).SetUint64(pendingTx.Gas()) baseFee.Mul(baseFee, optimisticBlock.BaseFee) totalCost.Add(totalCost, baseFee) + marshalledTxs := [][]byte{} marshalledTx, err := pendingTx.MarshalBinary() if err != nil { return status.Errorf(codes.Internal, "error marshalling tx: %v", err) } marshalledTxs = append(marshalledTxs, marshalledTx) - } - bundle.Fee = totalCost.Uint64() - bundle.Transactions = marshalledTxs - bundle.BaseSequencerBlockHash = *s.currentOptimisticSequencerBlock.Load() - bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() + bundle.Fee = totalCost.Uint64() + bundle.Transactions = marshalledTxs + bundle.BaseSequencerBlockHash = *s.currentOptimisticSequencerBlock.Load() + bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() - err := stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) - if err != nil { - return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) + err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) + if err != nil { + return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) + } } case err := <-pendingTxEvent.Err(): diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 1a54bb55b..689143f85 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -10,6 +10,7 @@ import ( "crypto/sha256" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" @@ -646,6 +647,146 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { } } +func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { + ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + + // call genesis info + genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + require.Nil(t, err, "GetGenesisInfo failed") + require.NotNil(t, genesisInfo, "GenesisInfo is nil") + + // call get commitment state + commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + require.Nil(t, err, "GetCommitmentState failed") + require.NotNil(t, commitmentState, "CommitmentState is nil") + + // get previous block hash + previousBlock := ethservice.BlockChain().CurrentSafeBlock() + require.NotNil(t, previousBlock, "Previous block not found") + + // create the optimistic block via the StreamExecuteOptimisticBlock rpc + requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{} + sequencerBlockHash := []byte("sequencer_block_hash") + + // create 1 stream item with 5 txs + txs := []*types.Transaction{} + marshalledTxs := []*sequencerblockv1.RollupData{} + for i := 0; i < 5; i++ { + unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + require.Nil(t, err, "Failed to sign tx") + txs = append(txs, tx) + + marshalledTx, err := tx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, + }) + } + + req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{ + SequencerBlockHash: sequencerBlockHash, + Transactions: marshalledTxs, + Timestamp: ×tamppb.Timestamp{ + Seconds: int64(previousBlock.Time + 2), + }, + }} + + requestStreams = append(requestStreams, &req) + + mockBidirectionalStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{ + requestStream: requestStreams, + accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{}, + requestCounter: 0, + } + + errorCh := make(chan error) + go func(errorCh chan error) { + errorCh <- serviceV1Alpha1.ExecuteOptimisticBlockStream(mockBidirectionalStream) + }(errorCh) + + select { + // stream either errors out of gets closed + case err := <-errorCh: + require.Nil(t, err, "StreamExecuteOptimisticBlock failed") + } + + require.Len(t, mockBidirectionalStream.accumulatedResponses, 1, "Number of responses should match the number of requests") + accumulatedResponse := mockBidirectionalStream.accumulatedResponses[0] + + currentOptimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock() + require.NotNil(t, currentOptimisticBlock, "Optimistic block is not set") + require.True(t, bytes.Equal(accumulatedResponse.GetBlock().Hash, currentOptimisticBlock.Hash().Bytes()), "Optimistic block hashes do not match") + require.True(t, bytes.Equal(accumulatedResponse.GetBlock().ParentBlockHash, currentOptimisticBlock.ParentHash.Bytes()), "Optimistic block parent hashes do not match") + require.Equal(t, uint64(accumulatedResponse.GetBlock().Number), currentOptimisticBlock.Number.Uint64(), "Optimistic block numbers do not match") + + // assert mempool is cleared + astriaOrdered := ethservice.TxPool().AstriaOrdered() + require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") + + pendingTxs := ethservice.TxPool().Pending(txpool.PendingFilter{ + OnlyPlainTxs: true, + }) + require.Equal(t, len(pendingTxs), 0, "Mempool should be empty") + + mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBundleStreamResponse]{ + sentResponses: []*optimsticPb.GetBundleStreamResponse{}, + } + + errorCh = make(chan error) + go func() { + errorCh <- serviceV1Alpha1.StreamBundles(&mockServerSideStreaming) + }() + + // optimistic block is created, we can now add txs and check if they get streamed + // create 5 txs + txs = []*types.Transaction{} + for i := 5; i < 10; i++ { + unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + require.Nil(t, err, "Failed to sign tx") + txs = append(txs, tx) + + marshalledTx, err := tx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, + }) + } + + txErrors := ethservice.TxPool().Add(txs, true, false) + for _, txErr := range txErrors { + require.Nil(t, txErr, "Failed to add tx to mempool") + } + + pendingTxs = ethservice.TxPool().Pending(txpool.PendingFilter{ + OnlyPlainTxs: true, + }) + require.Len(t, pendingTxs, 1, "Mempool should have 1 tx") + addrTxs := pendingTxs[testAddr] + require.Len(t, addrTxs, 5, "Mempool should have 5 txs for test address") + + time.Sleep(5 * time.Second) + + // close the mempool to error the method out + err = ethservice.TxPool().Close() + require.Nil(t, err, "Failed to close mempool") + + select { + case err := <-errorCh: + require.ErrorContains(t, err, "error waiting for pending transactions") + } + + require.Len(t, mockServerSideStreaming.sentResponses, 5, "Number of responses should match the number of requests") + + for _, resp := range mockServerSideStreaming.sentResponses { + bundle := resp.GetBundle() + require.Len(t, bundle.Transactions, 1, "Bundle should have 1 tx") + require.True(t, bytes.Equal(bundle.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") + require.True(t, bytes.Equal(bundle.BaseSequencerBlockHash, *serviceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") + } +} + func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlockStream(t *testing.T) { ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) @@ -713,7 +854,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlockStream(t *testing. }(errorCh) select { - // stream either errors out of gets closed + // the stream will either errors out or gets closed case err := <-errorCh: require.Nil(t, err, "StreamExecuteOptimisticBlock failed") } From 35f3a6c8f519612f470607374f948647a45e2aa6 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 1 Oct 2024 20:10:18 +0200 Subject: [PATCH 26/79] only send the effective tip as part of the fee --- grpc/execution/server.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index a8e2d699b..478809bcd 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -264,9 +264,6 @@ func (s *ExecutionServiceServerV1) StreamBundles(stream optimisticGrpc.BundleSer totalCost := big.NewInt(0) effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee)) totalCost.Add(totalCost, effectiveTip) - baseFee := new(big.Int).SetUint64(pendingTx.Gas()) - baseFee.Mul(baseFee, optimisticBlock.BaseFee) - totalCost.Add(totalCost, baseFee) marshalledTxs := [][]byte{} marshalledTx, err := pendingTx.MarshalBinary() From 4b612d6d409b9b0fa067add6af52bb239eb9f633 Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 16 Oct 2024 23:37:38 +0530 Subject: [PATCH 27/79] minor test updates --- grpc/execution/server_test.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 689143f85..dc8a0b668 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -759,13 +759,11 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { require.Nil(t, txErr, "Failed to add tx to mempool") } - pendingTxs = ethservice.TxPool().Pending(txpool.PendingFilter{ - OnlyPlainTxs: true, - }) - require.Len(t, pendingTxs, 1, "Mempool should have 1 tx") - addrTxs := pendingTxs[testAddr] - require.Len(t, addrTxs, 5, "Mempool should have 5 txs for test address") + pending, queued := ethservice.TxPool().Stats() + require.Equal(t, pending, 5, "Mempool should have 5 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") + // give some time for the txs to stream time.Sleep(5 * time.Second) // close the mempool to error the method out From 5acc678d8797f52dea3a101fb98c526388e74879 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 5 Nov 2024 11:37:14 +0530 Subject: [PATCH 28/79] rename grpc methods --- grpc/execution/server.go | 2 +- grpc/execution/server_test.go | 12 +++++------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 478809bcd..0ad72f07c 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -246,7 +246,7 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } -func (s *ExecutionServiceServerV1) StreamBundles(stream optimisticGrpc.BundleService_GetBundleStreamServer) error { +func (s *ExecutionServiceServerV1) GetBundleStream(stream optimisticGrpc.BundleService_GetBundleStreamServer) error { pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := s.eth.TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index dc8a0b668..79d2f5889 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -10,7 +10,6 @@ import ( "crypto/sha256" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/params" @@ -724,10 +723,9 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { astriaOrdered := ethservice.TxPool().AstriaOrdered() require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - pendingTxs := ethservice.TxPool().Pending(txpool.PendingFilter{ - OnlyPlainTxs: true, - }) - require.Equal(t, len(pendingTxs), 0, "Mempool should be empty") + pending, queued := ethservice.TxPool().Stats() + require.Equal(t, pending, 0, "Mempool should have 0 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBundleStreamResponse]{ sentResponses: []*optimsticPb.GetBundleStreamResponse{}, @@ -735,7 +733,7 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { errorCh = make(chan error) go func() { - errorCh <- serviceV1Alpha1.StreamBundles(&mockServerSideStreaming) + errorCh <- serviceV1Alpha1.GetBundleStream(&mockServerSideStreaming) }() // optimistic block is created, we can now add txs and check if they get streamed @@ -759,7 +757,7 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { require.Nil(t, txErr, "Failed to add tx to mempool") } - pending, queued := ethservice.TxPool().Stats() + pending, queued = ethservice.TxPool().Stats() require.Equal(t, pending, 5, "Mempool should have 5 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") From e57148b671c1e11698c9dbdf04ca30e93387350c Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 11 Nov 2024 13:39:24 +0530 Subject: [PATCH 29/79] close the bundle stream when client closes the connection --- grpc/execution/server.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 0ad72f07c..9df777ad0 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -285,6 +285,9 @@ func (s *ExecutionServiceServerV1) GetBundleStream(stream optimisticGrpc.BundleS case err := <-pendingTxEvent.Err(): return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) + case <-stream.Context().Done(): + log.Debug("GetBundleStream stream closed by client with error", "err", stream.Context().Err()) + return stream.Context().Err() } } } From e4cf56904d877f4d02ae697a118eb413fa962704 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 2 Dec 2024 17:57:36 +0530 Subject: [PATCH 30/79] allow only 1 client to connect to the bundle stream --- grpc/execution/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 9df777ad0..2826c95d2 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -246,7 +246,7 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } -func (s *ExecutionServiceServerV1) GetBundleStream(stream optimisticGrpc.BundleService_GetBundleStreamServer) error { +func (s *ExecutionServiceServerV1) GetBundleStream(stream optimisticGrpc.BundleService_GetBundleStreamServer) error {: pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := s.eth.TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() From f795b75ad7c4d44c3e2725f87ccb867f61f21197 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 2 Dec 2024 17:57:52 +0530 Subject: [PATCH 31/79] fix minor error --- grpc/execution/server.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 2826c95d2..9df777ad0 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -246,7 +246,7 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } -func (s *ExecutionServiceServerV1) GetBundleStream(stream optimisticGrpc.BundleService_GetBundleStreamServer) error {: +func (s *ExecutionServiceServerV1) GetBundleStream(stream optimisticGrpc.BundleService_GetBundleStreamServer) error { pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := s.eth.TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() From 0c02e8ccd1e043e354fb638a648c42b420524057 Mon Sep 17 00:00:00 2001 From: Bharath Date: Fri, 11 Oct 2024 15:41:02 +0200 Subject: [PATCH 32/79] fetch the next fee recipient under the block execution lock --- grpc/execution/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 9df777ad0..6452089bd 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -56,6 +56,7 @@ type ExecutionServiceServerV1 struct { bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty + // TODO: bharath - we could make this an atomic pointer??? nextFeeRecipient common.Address // Fee recipient for the next block currentOptimisticSequencerBlock atomic.Pointer[[]byte] From a5fb8a52d0ed5bf364f0373d5331723ae84a4737 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 17 Oct 2024 00:03:05 +0530 Subject: [PATCH 33/79] validate txs before optimistically executing them --- core/txpool/blobpool/blobpool.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index 30b507f08..b47621650 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -341,6 +341,8 @@ func (p *BlobPool) AstriaExcludedFromBlock() *types.Transactions { return &ty func (p *BlobPool) AstriaOrdered() *types.Transactions { return &types.Transactions{} } func (p *BlobPool) ValidateTx(tx *types.Transaction) error { return nil } +func (p *BlobPool) ValidateTx(tx *types.Transaction) error { return nil } + // Filter returns whether the given transaction can be consumed by the blob pool. func (p *BlobPool) Filter(tx *types.Transaction) bool { return tx.Type() == types.BlobTxType From a114f0df462ee6989ad538d01c5a5fe82c50f47c Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 16 Oct 2024 16:24:29 +0530 Subject: [PATCH 34/79] support uds endpoints for auctioneer --- cmd/utils/flags.go | 9 +++++++++ node/config.go | 21 ++++++++++++++------- node/config_test.go | 2 +- node/defaults.go | 5 +++-- node/grpcstack.go | 33 +++++++++++++++++++++++++-------- node/node.go | 11 ++++++++--- 6 files changed, 60 insertions(+), 21 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index ebe359561..11fcaf016 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -768,6 +768,12 @@ var ( Value: node.DefaultGRPCPort, Category: flags.APICategory, } + GRPCUdsFlag = &cli.StringFlag{ + Name: "grpc.uds", + Usage: "gRPC server UDS socket", + Value: node.DefaultGRPCUdsSocket, + Category: flags.APICategory, + } // Network Settings MaxPeersFlag = &cli.IntFlag{ @@ -1216,6 +1222,9 @@ func setGRPC(ctx *cli.Context, cfg *node.Config) { if ctx.IsSet(GRPCPortFlag.Name) { cfg.GRPCPort = ctx.Int(GRPCPortFlag.Name) } + if ctx.IsSet(GRPCUdsFlag.Name) { + cfg.GRPCUds = ctx.String(GRPCUdsFlag.Name) + } } } diff --git a/node/config.go b/node/config.go index d1e29baa0..87013c002 100644 --- a/node/config.go +++ b/node/config.go @@ -195,6 +195,8 @@ type Config struct { GRPCHost string `toml:",omitempty"` // GRPCPort is the TCP port number on which to start the gRPC server. GRPCPort int `toml:",omitempty"` + // GRPCUds is the Unix domain socket path on which to start the gRPC server. + GRPCUds string `toml:",omitempty"` // Logger is a custom logger to use with the p2p.Server. Logger log.Logger `toml:",omitempty"` @@ -273,30 +275,35 @@ func (c *Config) HTTPEndpoint() string { return net.JoinHostPort(c.HTTPHost, fmt.Sprintf("%d", c.HTTPPort)) } -// GRPCEndpoint resolves a gRPC endpoint based on the configured host interface +// GRPCTcpEndpoint resolves a gRPC TCP endpoint based on the configured host interface // and port parameters. -func (c *Config) GRPCEndpoint() string { +func (c *Config) GRPCTcpEndpoint() string { if c.GRPCHost == "" { return "" } return fmt.Sprintf("%s:%d", c.GRPCHost, c.GRPCPort) } -// DefaultHTTPEndpoint returns the HTTP endpoint used by default. +// GRPCUdsEndpoint resolves a gRPC Unix domain socket endpoint based on the configured path. +func (c *Config) GRPCUdsEndpoint() string { + return c.GRPCUds +} + +// DefaultHTTPEndpoint returns the HTTP tcpEndpoint used by default. func DefaultHTTPEndpoint() string { config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort, AuthPort: DefaultAuthPort} return config.HTTPEndpoint() } -// DefaultGRPCEndpoint returns the gRPC endpoint used by default. +// DefaultGRPCEndpoint returns the gRPC tcpEndpoint used by default. // NOTE - implemented this to be consistent with DefaultHTTPEndpoint, but // neither are ever used func DefaultGRPCEndpoint() string { config := &Config{GRPCHost: DefaultGRPCHost, GRPCPort: DefaultGRPCPort} - return config.GRPCEndpoint() + return config.GRPCTcpEndpoint() } -// WSEndpoint resolves a websocket endpoint based on the configured host interface +// WSEndpoint resolves a websocket tcpEndpoint based on the configured host interface // and port parameters. func (c *Config) WSEndpoint() string { if c.WSHost == "" { @@ -305,7 +312,7 @@ func (c *Config) WSEndpoint() string { return net.JoinHostPort(c.WSHost, fmt.Sprintf("%d", c.WSPort)) } -// DefaultWSEndpoint returns the websocket endpoint used by default. +// DefaultWSEndpoint returns the websocket tcpEndpoint used by default. func DefaultWSEndpoint() string { config := &Config{WSHost: DefaultWSHost, WSPort: DefaultWSPort} return config.WSEndpoint() diff --git a/node/config_test.go b/node/config_test.go index e8af8ddcd..9cfda04d6 100644 --- a/node/config_test.go +++ b/node/config_test.go @@ -94,7 +94,7 @@ func TestIPCPathResolution(t *testing.T) { // Only run when platform/test match if (runtime.GOOS == "windows") == test.Windows { if endpoint := (&Config{DataDir: test.DataDir, IPCPath: test.IPCPath}).IPCEndpoint(); endpoint != test.Endpoint { - t.Errorf("test %d: IPC endpoint mismatch: have %s, want %s", i, endpoint, test.Endpoint) + t.Errorf("test %d: IPC tcpEndpoint mismatch: have %s, want %s", i, endpoint, test.Endpoint) } } } diff --git a/node/defaults.go b/node/defaults.go index 326ed2373..0c0d2c935 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -35,8 +35,9 @@ const ( DefaultAuthHost = "localhost" // Default host interface for the authenticated apis DefaultAuthPort = 8551 // Default port for the authenticated apis // grpc - DefaultGRPCHost = "[::1]" // Default host interface for the gRPC server - DefaultGRPCPort = 50051 // Default port for the gRPC server + DefaultGRPCHost = "[::1]" // Default host interface for the gRPC server for the execution api + DefaultGRPCPort = 50051 // Default port for the gRPC server for the execution api + DefaultGRPCUdsSocket = "/tmp/auctioneer.sock" // Default UDS socket for the gRPC auctioneer streams ) const ( diff --git a/node/grpcstack.go b/node/grpcstack.go index 86ebc8b5f..5619efde7 100644 --- a/node/grpcstack.go +++ b/node/grpcstack.go @@ -2,6 +2,7 @@ package node import ( "net" + "os" "sync" astriaGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/execution/v1/executionv1grpc" @@ -14,7 +15,8 @@ import ( type GRPCServerHandler struct { mu sync.Mutex - endpoint string + tcpEndpoint string + udsEndpoint string server *grpc.Server executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer } @@ -25,10 +27,11 @@ type GRPCServerHandler struct { func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, cfg *Config) error { server := grpc.NewServer() - log.Info("gRPC server enabled", "endpoint", cfg.GRPCEndpoint()) + log.Info("gRPC server enabled", "tcpEndpoint", cfg.GRPCTcpEndpoint(), "udsEndpoint", cfg.GRPCUdsEndpoint()) serverHandler := &GRPCServerHandler{ - endpoint: cfg.GRPCEndpoint(), + tcpEndpoint: cfg.GRPCTcpEndpoint(), + udsEndpoint: cfg.GRPCUdsEndpoint(), server: server, executionServiceServerV1a2: &execServ, } @@ -44,17 +47,31 @@ func (handler *GRPCServerHandler) Start() error { handler.mu.Lock() defer handler.mu.Unlock() - if handler.endpoint == "" { + if handler.tcpEndpoint == "" { + return nil + } + if handler.udsEndpoint == "" { return nil } // Start the gRPC server - lis, err := net.Listen("tcp", handler.endpoint) + tcpLis, err := net.Listen("tcp", handler.tcpEndpoint) if err != nil { return err } - go handler.server.Serve(lis) - log.Info("gRPC server started", "endpoint", handler.endpoint) + + // Remove any existing socket file + if err := os.RemoveAll(handler.udsEndpoint); err != nil { + return err + } + udsLis, err := net.Listen("unix", handler.udsEndpoint) + if err != nil { + return err + } + + go handler.server.Serve(tcpLis) + go handler.server.Serve(udsLis) + log.Info("gRPC server started", "tcpEndpoint", handler.tcpEndpoint, "udsEndpoint", handler.udsEndpoint) return nil } @@ -64,6 +81,6 @@ func (handler *GRPCServerHandler) Stop() error { defer handler.mu.Unlock() handler.server.GracefulStop() - log.Info("gRPC server stopped", "endpoint", handler.endpoint) + log.Info("gRPC server stopped", "tcpEndpoint", handler.tcpEndpoint, "udsEndpoint", handler.udsEndpoint) return nil } diff --git a/node/node.go b/node/node.go index 896763033..02a91a8ba 100644 --- a/node/node.go +++ b/node/node.go @@ -724,9 +724,14 @@ func (n *Node) HTTPEndpoint() string { return "http://" + n.http.listenAddr() } -// GRPCENDPOINT returns the URL of the GRPC server. -func (n *Node) GRPCEndpoint() string { - return "http://" + n.grpcServerHandler.endpoint +// GRPCTcpEndpoint returns the URL of the GRPC server. +func (n *Node) GRPCTcpEndpoint() string { + return "http://" + n.grpcServerHandler.tcpEndpoint +} + +// GRPCUdsEndpoint returns the URL of the GRPC server UDS endpoint +func (n *Node) GRPCUdsEndpoint() string { + return n.grpcServerHandler.udsEndpoint } // WSEndpoint returns the current JSON-RPC over WebSocket endpoint. From edfddc92dac9e931c41a4d30b43b0fc3d8cd48b3 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 24 Oct 2024 21:02:32 +0530 Subject: [PATCH 35/79] remove duplicate code --- core/txpool/blobpool/blobpool.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/core/txpool/blobpool/blobpool.go b/core/txpool/blobpool/blobpool.go index b47621650..30b507f08 100644 --- a/core/txpool/blobpool/blobpool.go +++ b/core/txpool/blobpool/blobpool.go @@ -341,8 +341,6 @@ func (p *BlobPool) AstriaExcludedFromBlock() *types.Transactions { return &ty func (p *BlobPool) AstriaOrdered() *types.Transactions { return &types.Transactions{} } func (p *BlobPool) ValidateTx(tx *types.Transaction) error { return nil } -func (p *BlobPool) ValidateTx(tx *types.Transaction) error { return nil } - // Filter returns whether the given transaction can be consumed by the blob pool. func (p *BlobPool) Filter(tx *types.Transaction) bool { return tx.Type() == types.BlobTxType From 12e3ab2ed06411bb49811e15e81fcd9dd9f6a02e Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 4 Nov 2024 12:25:13 +0530 Subject: [PATCH 36/79] add uds flag to options --- cmd/geth/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 162655190..f8c40bc58 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -191,6 +191,7 @@ var ( utils.GRPCEnabledFlag, utils.GRPCHostFlag, utils.GRPCPortFlag, + utils.GRPCUdsFlag, } metricsFlags = []cli.Flag{ From 14c097e08dbbbf7673b904e22f9d0bcfa63ee8b2 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 2 Dec 2024 18:05:59 +0530 Subject: [PATCH 37/79] separate out execution api services and optimistic execution api services --- cmd/geth/config.go | 17 +- cmd/utils/flags.go | 7 +- core/vm/memory.go | 2 +- grpc/execution/server.go | 469 +++++------------- grpc/execution/server_test.go | 452 ++--------------- grpc/execution/test_setup.go | 11 + grpc/execution/validation.go | 116 +---- .../mock_grpc_stream.go | 2 +- grpc/optimistic/server.go | 292 +++++++++++ grpc/optimistic/server_test.go | 439 ++++++++++++++++ grpc/optimistic/test_setup.go | 12 + grpc/optimistic/validation.go | 17 + grpc/shared/container.go | 166 +++++++ grpc/shared/test_setup.go | 32 ++ grpc/{execution => shared}/test_utils.go | 44 +- grpc/shared/validation.go | 114 +++++ grpc/{execution => shared}/validation_test.go | 22 +- node/grpcstack.go | 26 +- 18 files changed, 1294 insertions(+), 946 deletions(-) create mode 100644 grpc/execution/test_setup.go rename grpc/{execution => optimistic}/mock_grpc_stream.go (99%) create mode 100644 grpc/optimistic/server.go create mode 100644 grpc/optimistic/server_test.go create mode 100644 grpc/optimistic/test_setup.go create mode 100644 grpc/optimistic/validation.go create mode 100644 grpc/shared/container.go create mode 100644 grpc/shared/test_setup.go rename grpc/{execution => shared}/test_utils.go (69%) create mode 100644 grpc/shared/validation.go rename grpc/{execution => shared}/validation_test.go (90%) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index fa122f8cc..a9eb24653 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -21,6 +21,8 @@ import ( "errors" "fmt" "github.com/ethereum/go-ethereum/eth/catalyst" + "github.com/ethereum/go-ethereum/grpc/optimistic" + "github.com/ethereum/go-ethereum/grpc/shared" "os" "reflect" "runtime" @@ -206,11 +208,24 @@ func makeFullNode(ctx *cli.Context) *node.Node { // Configure gRPC if requested. if ctx.IsSet(utils.GRPCEnabledFlag.Name) { +<<<<<<< HEAD serviceV1, err := execution.NewExecutionServiceServerV1(eth) +======= + sharedService, err := shared.NewSharedServiceContainer(eth) +>>>>>>> 21f5aa7f7 (separate out execution api services and optimistic execution api services) if err != nil { - utils.Fatalf("failed to create execution service: %v", err) + utils.Fatalf("failed to create shared service container: %v", err) } +<<<<<<< HEAD utils.RegisterGRPCExecutionService(stack, serviceV1, &cfg.Node) +======= + + serviceV1a2 := execution.NewExecutionServiceServerV1Alpha2(sharedService) + + optimisticServiceV1a1 := optimistic.NewOptimisticServiceV1Alpha(sharedService) + + utils.RegisterGRPCServices(stack, serviceV1a2, optimisticServiceV1a1, optimisticServiceV1a1, &cfg.Node) +>>>>>>> 21f5aa7f7 (separate out execution api services and optimistic execution api services) } // Add the Ethereum Stats daemon if requested. diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 11fcaf016..188a71184 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -18,6 +18,7 @@ package utils import ( + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" "context" "crypto/ecdsa" "encoding/hex" @@ -1996,10 +1997,10 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst } } -// RegisterGRPCExecutionService adds the gRPC API to the node. +// RegisterGRPCServices adds the gRPC API to the node. // It was done this way so that our grpc execution server can access the ethapi.Backend -func RegisterGRPCExecutionService(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, cfg *node.Config) { - if err := node.NewGRPCServerHandler(stack, execServ, cfg); err != nil { +func RegisterGRPCServices(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecutionServ optimisticGrpc.OptimisticExecutionServiceServer, bundleStreamingServ optimisticGrpc.BundleServiceServer, cfg *node.Config) { + if err := node.NewGRPCServerHandler(stack, execServ, optimisticExecutionServ, bundleStreamingServ, cfg); err != nil { Fatalf("Failed to register the gRPC service: %v", err) } } diff --git a/core/vm/memory.go b/core/vm/memory.go index e0202fd7c..3ab292000 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -34,7 +34,7 @@ func NewMemory() *Memory { // Set sets offset + size to value func (m *Memory) Set(offset, size uint64, value []byte) { // It's possible the offset is greater than 0 and size equals 0. This is because - // the calcMemSize (common.go) could potentially return 0 when size is zero (NO-OP) + // the calcMemSize (container.go) could potentially return 0 when size is zero (NO-OP) if size > 0 { // length of store may never be less than offset + size. // The store should be resized PRIOR to setting the memory diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 6452089bd..879db740e 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -7,29 +7,23 @@ package execution import ( "context" "crypto/sha256" - "errors" "fmt" - cmath "github.com/ethereum/go-ethereum/common/math" - "io" - "math/big" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/grpc/shared" + "github.com/ethereum/go-ethereum/params" "sync" - "sync/atomic" "time" - optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" astriaGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/execution/v1/executionv1grpc" - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/miner" - "github.com/ethereum/go-ethereum/params" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/timestamppb" @@ -41,150 +35,56 @@ type ExecutionServiceServerV1 struct { // NOTE - from the generated code: All implementations must embed // UnimplementedExecutionServiceServer for forward compatibility astriaGrpc.UnimplementedExecutionServiceServer - optimisticGrpc.UnimplementedOptimisticExecutionServiceServer - optimisticGrpc.UnimplementedBundleServiceServer - - eth *eth.Ethereum - bc *core.BlockChain - - commitmentUpdateLock sync.Mutex // Lock for the forkChoiceUpdated method - blockExecutionLock sync.Mutex // Lock for the NewPayload method - - genesisInfoCalled bool - getCommitmentStateCalled bool - - bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account - bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty - - // TODO: bharath - we could make this an atomic pointer??? - nextFeeRecipient common.Address // Fee recipient for the next block - currentOptimisticSequencerBlock atomic.Pointer[[]byte] + sharedServiceContainer *shared.SharedServiceContainer } var ( - getGenesisInfoRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_requests", nil) - getGenesisInfoSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_success", nil) - getBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_block_requests", nil) - getBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_block_success", nil) - batchGetBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_requests", nil) - batchGetBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_success", nil) - executeBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_requests", nil) - executeBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_success", nil) - executeOptimisticBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/execute_optimistic_block_requests", nil) - executeOptimisticBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/execute_optimistic_block_success", nil) - getCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_requests", nil) - getCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_success", nil) - updateCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_requests", nil) - updateCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_success", nil) + getGenesisInfoRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_requests", nil) + getGenesisInfoSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_genesis_info_success", nil) + getBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_block_requests", nil) + getBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_block_success", nil) + batchGetBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_requests", nil) + batchGetBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/batch_get_block_success", nil) + executeBlockRequestCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_requests", nil) + executeBlockSuccessCount = metrics.GetOrRegisterCounter("astria/execution/execute_block_success", nil) + getCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_requests", nil) + getCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/get_commitment_state_success", nil) + updateCommitmentStateRequestCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_requests", nil) + updateCommitmentStateSuccessCount = metrics.GetOrRegisterCounter("astria/execution/update_commitment_state_success", nil) softCommitmentHeight = metrics.GetOrRegisterGauge("astria/execution/soft_commitment_height", nil) firmCommitmentHeight = metrics.GetOrRegisterGauge("astria/execution/firm_commitment_height", nil) totalExecutedTxCount = metrics.GetOrRegisterCounter("astria/execution/total_executed_tx", nil) - executeBlockTimer = metrics.GetOrRegisterTimer("astria/execution/execute_block_time", nil) - executionOptimisticBlockTimer = metrics.GetOrRegisterTimer("astria/execution/execute_optimistic_block_time", nil) - commitmentStateUpdateTimer = metrics.GetOrRegisterTimer("astria/execution/commitment", nil) + executeBlockTimer = metrics.GetOrRegisterTimer("astria/execution/execute_block_time", nil) + commitmentStateUpdateTimer = metrics.GetOrRegisterTimer("astria/execution/commitment", nil) ) -func NewExecutionServiceServerV1(eth *eth.Ethereum) (*ExecutionServiceServerV1, error) { - bc := eth.BlockChain() - - if bc.Config().AstriaRollupName == "" { - return nil, errors.New("rollup name not set") - } - - if bc.Config().AstriaSequencerInitialHeight == 0 { - return nil, errors.New("sequencer initial height not set") - } - - if bc.Config().AstriaCelestiaInitialHeight == 0 { - return nil, errors.New("celestia initial height not set") - } - - if bc.Config().AstriaCelestiaHeightVariance == 0 { - return nil, errors.New("celestia height variance not set") - } - - bridgeAddresses := make(map[string]*params.AstriaBridgeAddressConfig) - bridgeAllowedAssets := make(map[string]struct{}) - if bc.Config().AstriaBridgeAddressConfigs == nil { - log.Warn("bridge addresses not set") - } else { - nativeBridgeSeen := false - for _, cfg := range bc.Config().AstriaBridgeAddressConfigs { - err := cfg.Validate(bc.Config().AstriaSequencerAddressPrefix) - if err != nil { - return nil, fmt.Errorf("invalid bridge address config: %w", err) - } - - if cfg.Erc20Asset == nil { - if nativeBridgeSeen { - return nil, errors.New("only one native bridge address is allowed") - } - nativeBridgeSeen = true - } - - if cfg.Erc20Asset != nil && cfg.SenderAddress == (common.Address{}) { - return nil, errors.New("astria bridge sender address must be set for bridged ERC20 assets") - } - - bridgeCfg := cfg - bridgeAddresses[cfg.BridgeAddress] = &bridgeCfg - bridgeAllowedAssets[cfg.AssetDenom] = struct{}{} - if cfg.Erc20Asset == nil { - log.Info("bridge for sequencer native asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom) - } else { - log.Info("bridge for ERC20 asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom, "contractAddress", cfg.Erc20Asset.ContractAddress) - } - } +func NewExecutionServiceServerV1Alpha2(sharedServiceContainer *shared.SharedServiceContainer) *ExecutionServiceServerV1 { + execServiceServerV1Alpha2 := &ExecutionServiceServerV1{ + sharedServiceContainer: sharedServiceContainer, } - // To decrease compute cost, we identify the next fee recipient at the start - // and update it as we execute blocks. - nextFeeRecipient := common.Address{} - if bc.Config().AstriaFeeCollectors == nil { - log.Warn("fee asset collectors not set, assets will be burned") - } else { - maxHeightCollectorMatch := uint32(0) - nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1 - for height, collector := range bc.Config().AstriaFeeCollectors { - if height <= nextBlock && height > maxHeightCollectorMatch { - maxHeightCollectorMatch = height - nextFeeRecipient = collector - } - } - } - - execServiceServerV1Alpha2 := ExecutionServiceServerV1{ - eth: eth, - bc: bc, - bridgeAddresses: bridgeAddresses, - bridgeAllowedAssets: bridgeAllowedAssets, - nextFeeRecipient: nextFeeRecipient, - } - - execServiceServerV1Alpha2.currentOptimisticSequencerBlock.Store(&[]byte{}) - - return &execServiceServerV1Alpha2, nil + return execServiceServerV1Alpha2 } func (s *ExecutionServiceServerV1) GetGenesisInfo(ctx context.Context, req *astriaPb.GetGenesisInfoRequest) (*astriaPb.GenesisInfo, error) { log.Debug("GetGenesisInfo called") getGenesisInfoRequestCount.Inc(1) - rollupHash := sha256.Sum256([]byte(s.bc.Config().AstriaRollupName)) + rollupHash := sha256.Sum256([]byte(s.Bc().Config().AstriaRollupName)) rollupId := primitivev1.RollupId{Inner: rollupHash[:]} res := &astriaPb.GenesisInfo{ RollupId: &rollupId, - SequencerGenesisBlockHeight: s.bc.Config().AstriaSequencerInitialHeight, - CelestiaBlockVariance: s.bc.Config().AstriaCelestiaHeightVariance, + SequencerGenesisBlockHeight: s.Bc().Config().AstriaSequencerInitialHeight, + CelestiaBlockVariance: s.Bc().Config().AstriaCelestiaHeightVariance, } log.Info("GetGenesisInfo completed", "response", res) getGenesisInfoSuccessCount.Inc(1) - s.genesisInfoCalled = true + s.SetGenesisInfoCalled(true) return res, nil } @@ -240,195 +140,6 @@ func (s *ExecutionServiceServerV1) BatchGetBlocks(ctx context.Context, req *astr return res, nil } -func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { - lo := big.NewInt(0).SetUint64(u128.Lo) - hi := big.NewInt(0).SetUint64(u128.Hi) - hi.Lsh(hi, 64) - return lo.Add(lo, hi) -} - -func (s *ExecutionServiceServerV1) GetBundleStream(stream optimisticGrpc.BundleService_GetBundleStreamServer) error { - pendingTxEventCh := make(chan core.NewTxsEvent) - pendingTxEvent := s.eth.TxPool().SubscribeTransactions(pendingTxEventCh, false) - defer pendingTxEvent.Unsubscribe() - - for { - select { - case pendingTxs := <-pendingTxEventCh: - // get the optimistic block - // this is an in-memory read, so there shouldn't be a lot of concerns on speed - optimisticBlock := s.eth.BlockChain().CurrentOptimisticBlock() - - for _, pendingTx := range pendingTxs.Txs { - bundle := optimsticPb.Bundle{} - - totalCost := big.NewInt(0) - effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee)) - totalCost.Add(totalCost, effectiveTip) - - marshalledTxs := [][]byte{} - marshalledTx, err := pendingTx.MarshalBinary() - if err != nil { - return status.Errorf(codes.Internal, "error marshalling tx: %v", err) - } - marshalledTxs = append(marshalledTxs, marshalledTx) - - bundle.Fee = totalCost.Uint64() - bundle.Transactions = marshalledTxs - bundle.BaseSequencerBlockHash = *s.currentOptimisticSequencerBlock.Load() - bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() - - err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) - if err != nil { - return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) - } - } - - case err := <-pendingTxEvent.Err(): - return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) - case <-stream.Context().Done(): - log.Debug("GetBundleStream stream closed by client with error", "err", stream.Context().Err()) - return stream.Context().Err() - } - } -} - -func (s *ExecutionServiceServerV1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { - mempoolClearingEventCh := make(chan core.NewMempoolCleared) - mempoolClearingEvent := s.eth.TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) - defer mempoolClearingEvent.Unsubscribe() - - for { - msg, err := stream.Recv() - // stream has been closed - if errors.Is(err, io.EOF) { - return nil - } - if err != nil { - return err - } - - baseBlock := msg.GetBaseBlock() - - // execute the optimistic block and wait for the mempool clearing event - optimisticBlock, err := s.ExecuteOptimisticBlock(stream.Context(), baseBlock) - if err != nil { - return status.Error(codes.Internal, "failed to execute optimistic block") - } - optimisticBlockHash := common.BytesToHash(optimisticBlock.Hash) - - // listen to the mempool clearing event and send the response back to the auctioneer when the mempool is cleared - select { - case event := <-mempoolClearingEventCh: - if event.NewHead.Hash() != optimisticBlockHash { - return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") - } - s.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) - err = stream.Send(&optimsticPb.ExecuteOptimisticBlockStreamResponse{ - Block: optimisticBlock, - BaseSequencerBlockHash: baseBlock.SequencerBlockHash, - }) - case <-time.After(500 * time.Millisecond): - return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") - case err := <-mempoolClearingEvent.Err(): - return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) - } - } -} - -func (s *ExecutionServiceServerV1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) { - // we need to execute the optimistic block - log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash) - executeOptimisticBlockRequestCount.Inc(1) - - if err := validateStaticExecuteOptimisticBlockRequest(req); err != nil { - log.Error("ExecuteOptimisticBlock called with invalid BaseBlock", "err", err) - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("BaseBlock is invalid: %s", err.Error())) - } - - if !s.syncMethodsCalled() { - return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called") - } - - // Deliberately called after lock, to more directly measure the time spent executing - executionStart := time.Now() - defer executionOptimisticBlockTimer.UpdateSince(executionStart) - - // get the soft block - softBlock := s.bc.CurrentSafeBlock() - - s.blockExecutionLock.Lock() - nextFeeRecipient := s.nextFeeRecipient - s.blockExecutionLock.Unlock() - - // the height that this block will be at - height := s.bc.CurrentBlock().Number.Uint64() + 1 - - txsToProcess := types.Transactions{} - for _, tx := range req.Transactions { - unmarshalledTx, err := validateAndUnmarshalSequencerTx(height, tx, s.bridgeAddresses, s.bridgeAllowedAssets) - if err != nil { - log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) - continue - } - - err = s.eth.TxPool().ValidateTx(unmarshalledTx) - if err != nil { - log.Debug("failed to validate tx, ignoring", "tx", tx, "err", err) - continue - } - - txsToProcess = append(txsToProcess, unmarshalledTx) - } - - // Build a payload to add to the chain - payloadAttributes := &miner.BuildPayloadArgs{ - Parent: softBlock.Hash(), - Timestamp: uint64(req.GetTimestamp().GetSeconds()), - Random: common.Hash{}, - FeeRecipient: nextFeeRecipient, - OverrideTransactions: txsToProcess, - IsOptimisticExecution: true, - } - payload, err := s.eth.Miner().BuildPayload(payloadAttributes) - if err != nil { - log.Error("failed to build payload", "err", err) - return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") - } - - block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil) - if err != nil { - log.Error("failed to convert executable data to block", err) - return nil, status.Error(codes.Internal, "failed to execute block") - } - - // this will insert the optimistic block into the chain and persist it's state without - // setting it as the HEAD. - err = s.bc.InsertBlockWithoutSetHead(block) - if err != nil { - log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", block.ParentHash(), "err", err) - return nil, status.Error(codes.Internal, "failed to insert block to chain") - } - - // we store a pointer to the optimistic block in the chain so that we can use it - // to retrieve the state of the optimistic block - s.bc.SetOptimistic(block) - - res := &astriaPb.Block{ - Number: uint32(block.NumberU64()), - Hash: block.Hash().Bytes(), - ParentBlockHash: block.ParentHash().Bytes(), - Timestamp: ×tamppb.Timestamp{ - Seconds: int64(block.Time()), - }, - } - - log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) - executeOptimisticBlockSuccessCount.Inc(1) - - return res, nil -} - // ExecuteBlock drives deterministic derivation of a rollup block from sequencer // block data func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astriaPb.ExecuteBlockRequest) (*astriaPb.Block, error) { @@ -439,29 +150,29 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria log.Debug("ExecuteBlock called", "prevBlockHash", common.BytesToHash(req.PrevBlockHash), "tx_count", len(req.Transactions), "timestamp", req.Timestamp) executeBlockRequestCount.Inc(1) - s.blockExecutionLock.Lock() - defer s.blockExecutionLock.Unlock() + s.BlockExecutionLock().Lock() + defer s.BlockExecutionLock().Unlock() // Deliberately called after lock, to more directly measure the time spent executing executionStart := time.Now() defer executeBlockTimer.UpdateSince(executionStart) - if !s.syncMethodsCalled() { + if !s.SyncMethodsCalled() { return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called") } // Validate block being created has valid previous hash prevHeadHash := common.BytesToHash(req.PrevBlockHash) - softHash := s.bc.CurrentSafeBlock().Hash() + softHash := s.Bc().CurrentSafeBlock().Hash() if prevHeadHash != softHash { return nil, status.Error(codes.FailedPrecondition, "Block can only be created on top of soft block.") } // the height that this block will be at - height := s.bc.CurrentBlock().Number.Uint64() + 1 + height := s.Bc().CurrentBlock().Number.Uint64() + 1 txsToProcess := types.Transactions{} for _, tx := range req.Transactions { - unmarshalledTx, err := validateAndUnmarshalSequencerTx(height, tx, s.bridgeAddresses, s.bridgeAllowedAssets) + unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, s.BridgeAddresses(), s.BridgeAllowedAssets()) if err != nil { log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) continue @@ -471,18 +182,18 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria // This set of ordered TXs on the TxPool is has been configured to be used by // the Miner when building a payload. - s.eth.TxPool().SetAstriaOrdered(txsToProcess) + s.Eth().TxPool().SetAstriaOrdered(txsToProcess) // Build a payload to add to the chain payloadAttributes := &miner.BuildPayloadArgs{ Parent: prevHeadHash, Timestamp: uint64(req.GetTimestamp().GetSeconds()), Random: common.Hash{}, - FeeRecipient: s.nextFeeRecipient, + FeeRecipient: s.NextFeeRecipient(), OverrideTransactions: types.Transactions{}, IsOptimisticExecution: false, } - payload, err := s.eth.Miner().BuildPayload(payloadAttributes) + payload, err := s.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") @@ -495,14 +206,14 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria log.Error("failed to convert executable data to block", err) return nil, status.Error(codes.Internal, "failed to execute block") } - err = s.bc.InsertBlockWithoutSetHead(block) + err = s.Bc().InsertBlockWithoutSetHead(block) if err != nil { log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", req.PrevBlockHash, "err", err) return nil, status.Error(codes.Internal, "failed to insert block to chain") } // remove txs from original mempool - s.eth.TxPool().ClearAstriaOrdered() + s.Eth().TxPool().ClearAstriaOrdered() res := &astriaPb.Block{ Number: uint32(block.NumberU64()), @@ -513,8 +224,8 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria }, } - if next, ok := s.bc.Config().AstriaFeeCollectors[res.Number+1]; ok { - s.nextFeeRecipient = next + if next, ok := s.Bc().Config().AstriaFeeCollectors[res.Number+1]; ok { + s.SetNextFeeRecipient(next) } log.Info("ExecuteBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) @@ -528,18 +239,18 @@ func (s *ExecutionServiceServerV1) GetCommitmentState(ctx context.Context, req * log.Info("GetCommitmentState called") getCommitmentStateRequestCount.Inc(1) - softBlock, err := ethHeaderToExecutionBlock(s.bc.CurrentSafeBlock()) + softBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentSafeBlock()) if err != nil { log.Error("error finding safe block", err) return nil, status.Error(codes.Internal, "could not locate soft block") } - firmBlock, err := ethHeaderToExecutionBlock(s.bc.CurrentFinalBlock()) + firmBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentFinalBlock()) if err != nil { log.Error("error finding final block", err) return nil, status.Error(codes.Internal, "could not locate firm block") } - celestiaBlock := s.bc.CurrentBaseCelestiaHeight() + celestiaBlock := s.Bc().CurrentBaseCelestiaHeight() res := &astriaPb.CommitmentState{ Soft: softBlock, @@ -549,7 +260,7 @@ func (s *ExecutionServiceServerV1) GetCommitmentState(ctx context.Context, req * log.Info("GetCommitmentState completed", "soft_height", res.Soft.Number, "firm_height", res.Firm.Number, "base_celestia_height", res.BaseCelestiaHeight) getCommitmentStateSuccessCount.Inc(1) - s.getCommitmentStateCalled = true + s.SetGetCommitmentStateCalled(true) return res, nil } @@ -566,15 +277,15 @@ func (s *ExecutionServiceServerV1) UpdateCommitmentState(ctx context.Context, re commitmentUpdateStart := time.Now() defer commitmentStateUpdateTimer.UpdateSince(commitmentUpdateStart) - s.commitmentUpdateLock.Lock() - defer s.commitmentUpdateLock.Unlock() + s.CommitmentUpdateLock().Lock() + defer s.CommitmentUpdateLock().Unlock() - if !s.syncMethodsCalled() { + if !s.SyncMethodsCalled() { return nil, status.Error(codes.PermissionDenied, "Cannot update commitment state until GetGenesisInfo && GetCommitmentState methods are called") } - if s.bc.CurrentBaseCelestiaHeight() > req.CommitmentState.BaseCelestiaHeight { - errStr := fmt.Sprintf("Base Celestia height cannot be decreased, current_base_celestia_height: %d, new_base_celestia_height: %d", s.bc.CurrentBaseCelestiaHeight(), req.CommitmentState.BaseCelestiaHeight) + if s.Bc().CurrentBaseCelestiaHeight() > req.CommitmentState.BaseCelestiaHeight { + errStr := fmt.Sprintf("Base Celestia height cannot be decreased, current_base_celestia_height: %d, new_base_celestia_height: %d", s.Bc().CurrentBaseCelestiaHeight(), req.CommitmentState.BaseCelestiaHeight) return nil, status.Error(codes.InvalidArgument, errStr) } @@ -582,50 +293,50 @@ func (s *ExecutionServiceServerV1) UpdateCommitmentState(ctx context.Context, re firmEthHash := common.BytesToHash(req.CommitmentState.Firm.Hash) // Validate that the firm and soft blocks exist before going further - softBlock := s.bc.GetBlockByHash(softEthHash) + softBlock := s.Bc().GetBlockByHash(softEthHash) if softBlock == nil { return nil, status.Error(codes.InvalidArgument, "Soft block specified does not exist") } - firmBlock := s.bc.GetBlockByHash(firmEthHash) + firmBlock := s.Bc().GetBlockByHash(firmEthHash) if firmBlock == nil { return nil, status.Error(codes.InvalidArgument, "Firm block specified does not exist") } - currentHead := s.bc.CurrentBlock().Hash() + currentHead := s.Bc().CurrentBlock().Hash() // Update the canonical chain to soft block. We must do this before last // validation step since there is no way to check if firm block descends from // anything but the canonical chain if currentHead != softEthHash { - if _, err := s.bc.SetCanonical(softBlock); err != nil { + if _, err := s.Bc().SetCanonical(softBlock); err != nil { log.Error("failed updating canonical chain to soft block", err) return nil, status.Error(codes.Internal, "Could not update head to safe hash") } } // Once head is updated validate that firm belongs to chain - rollbackBlock := s.bc.GetBlockByHash(currentHead) - if s.bc.GetCanonicalHash(firmBlock.NumberU64()) != firmEthHash { + rollbackBlock := s.Bc().GetBlockByHash(currentHead) + if s.Bc().GetCanonicalHash(firmBlock.NumberU64()) != firmEthHash { log.Error("firm block not found in canonical chain defined by soft block, rolling back") - if _, err := s.bc.SetCanonical(rollbackBlock); err != nil { + if _, err := s.Bc().SetCanonical(rollbackBlock); err != nil { panic("rollback to previous head after failed validation failed") } return nil, status.Error(codes.InvalidArgument, "soft block in request is not a descendant of the current firmly committed block") } - s.eth.SetSynced() + s.Eth().SetSynced() // Updating the safe and final after everything validated - currentSafe := s.bc.CurrentSafeBlock().Hash() + currentSafe := s.Bc().CurrentSafeBlock().Hash() if currentSafe != softEthHash { - s.bc.SetSafe(softBlock.Header()) + s.Bc().SetSafe(softBlock.Header()) } - currentFirm := s.bc.CurrentFinalBlock().Hash() + currentFirm := s.Bc().CurrentFinalBlock().Hash() if currentFirm != firmEthHash { - s.bc.SetCelestiaFinalized(firmBlock.Header(), req.CommitmentState.BaseCelestiaHeight) + s.Bc().SetCelestiaFinalized(firmBlock.Header(), req.CommitmentState.BaseCelestiaHeight) } log.Info("UpdateCommitmentState completed", "soft_height", softBlock.NumberU64(), "firm_height", firmBlock.NumberU64()) @@ -641,9 +352,9 @@ func (s *ExecutionServiceServerV1) getBlockFromIdentifier(identifier *astriaPb.B // Grab the header based on the identifier provided switch idType := identifier.Identifier.(type) { case *astriaPb.BlockIdentifier_BlockNumber: - header = s.bc.GetHeaderByNumber(uint64(identifier.GetBlockNumber())) + header = s.Bc().GetHeaderByNumber(uint64(identifier.GetBlockNumber())) case *astriaPb.BlockIdentifier_BlockHash: - header = s.bc.GetHeaderByHash(common.BytesToHash(identifier.GetBlockHash())) + header = s.Bc().GetHeaderByHash(common.BytesToHash(identifier.GetBlockHash())) default: return nil, status.Errorf(codes.InvalidArgument, "identifier has unexpected type %T", idType) } @@ -676,6 +387,58 @@ func ethHeaderToExecutionBlock(header *types.Header) (*astriaPb.Block, error) { }, nil } -func (s *ExecutionServiceServerV1) syncMethodsCalled() bool { - return s.genesisInfoCalled && s.getCommitmentStateCalled +func (s *ExecutionServiceServerV1) Eth() *eth.Ethereum { + return s.sharedServiceContainer.Eth() +} + +func (s *ExecutionServiceServerV1) Bc() *core.BlockChain { + return s.sharedServiceContainer.Bc() +} + +func (s *ExecutionServiceServerV1) SetGenesisInfoCalled(value bool) { + s.sharedServiceContainer.SetGenesisInfoCalled(value) +} + +func (s *ExecutionServiceServerV1) GenesisInfoCalled() bool { + return s.sharedServiceContainer.GenesisInfoCalled() +} + +func (s *ExecutionServiceServerV1) SetGetCommitmentStateCalled(value bool) { + s.sharedServiceContainer.SetGetCommitmentStateCalled(value) +} + +func (s *ExecutionServiceServerV1) CommitmentStateCalled() bool { + return s.sharedServiceContainer.CommitmentStateCalled() +} + +func (s *ExecutionServiceServerV1) CommitmentUpdateLock() *sync.Mutex { + return s.sharedServiceContainer.CommitmentUpdateLock() +} + +func (s *ExecutionServiceServerV1) BlockExecutionLock() *sync.Mutex { + return s.sharedServiceContainer.BlockExecutionLock() +} + +func (s *ExecutionServiceServerV1) NextFeeRecipient() common.Address { + return s.sharedServiceContainer.NextFeeRecipient() +} + +func (s *ExecutionServiceServerV1) SetNextFeeRecipient(feeRecipient common.Address) { + s.sharedServiceContainer.SetNextFeeRecipient(feeRecipient) +} + +func (s *ExecutionServiceServerV1) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig { + return s.sharedServiceContainer.BridgeAddresses() +} + +func (s *ExecutionServiceServerV1) BridgeAllowedAssets() map[string]struct{} { + return s.sharedServiceContainer.BridgeAllowedAssets() +} + +func (s *ExecutionServiceServerV1) BridgeSenderAddress() common.Address { + return s.sharedServiceContainer.BridgeSenderAddress() +} + +func (s *ExecutionServiceServerV1) SyncMethodsCalled() bool { + return s.sharedServiceContainer.SyncMethodsCalled() } diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 79d2f5889..e388fe321 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -1,7 +1,6 @@ package execution import ( - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" @@ -9,9 +8,9 @@ import ( "context" "crypto/sha256" "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/grpc/shared" "github.com/ethereum/go-ethereum/params" "github.com/holiman/uint256" "github.com/stretchr/testify/require" @@ -20,11 +19,11 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" "math/big" "testing" - "time" ) func TestExecutionService_GetGenesisInfo(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") @@ -34,11 +33,12 @@ func TestExecutionService_GetGenesisInfo(t *testing.T) { require.True(t, bytes.Equal(genesisInfo.RollupId.Inner, hashedRollupId[:]), "RollupId is not correct") require.Equal(t, genesisInfo.GetSequencerGenesisBlockHeight(), ethservice.BlockChain().Config().AstriaSequencerInitialHeight, "SequencerInitialHeight is not correct") require.Equal(t, genesisInfo.GetCelestiaBlockVariance(), ethservice.BlockChain().Config().AstriaCelestiaHeightVariance, "CelestiaHeightVariance is not correct") - require.True(t, serviceV1Alpha1.genesisInfoCalled, "GetGenesisInfo should be called") + require.True(t, serviceV1Alpha1.sharedServiceContainer.GenesisInfoCalled(), "GetGenesisInfo should be called") } func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") @@ -60,11 +60,12 @@ func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { require.Equal(t, uint64(commitmentState.Firm.Number), firmBlock.Number.Uint64(), "Firm Block Number do not match") require.Equal(t, commitmentState.BaseCelestiaHeight, ethservice.BlockChain().Config().AstriaCelestiaInitialHeight, "BaseCelestiaHeight is not correct") - require.True(t, serviceV1Alpha1.getCommitmentStateCalled, "GetCommitmentState should be called") + require.True(t, serviceV1Alpha1.sharedServiceContainer.CommitmentStateCalled(), "GetCommitmentState should be called") } func TestExecutionService_GetBlock(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { description string @@ -123,7 +124,8 @@ func TestExecutionService_GetBlock(t *testing.T) { } func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { description string @@ -200,7 +202,7 @@ func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { } func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { - ethservice, _ := setupExecutionService(t, 10) + ethservice, _ := shared.SetupSharedService(t, 10) tests := []struct { description string @@ -252,7 +254,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { // reset the blockchain with each test - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) var err error // adding this to prevent shadowing of genesisInfo in the below if branch var genesisInfo *astriaPb.GenesisInfo @@ -273,8 +276,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { txs := []*types.Transaction{} marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { - unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) require.Nil(t, err, "Failed to sign tx") txs = append(txs, tx) @@ -345,7 +348,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { } func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) @@ -365,8 +369,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi txs := []*types.Transaction{} marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { - unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) require.Nil(t, err, "Failed to sign tx") txs = append(txs, tx) @@ -479,410 +483,10 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi require.True(t, balanceDiff.Cmp(uint256.NewInt(1000000000000000000)) == 0, "Chain destination address balance is not correct") } -func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { - ethservice, _ := setupExecutionService(t, 10) - - tests := []struct { - description string - callGenesisInfoAndGetCommitmentState bool - numberOfTxs int - prevBlockHash []byte - timestamp uint64 - depositTxAmount *big.Int // if this is non zero then we send a deposit tx - expectedReturnCode codes.Code - }{ - { - description: "ExecuteOptimisticBlock without calling GetGenesisInfo and GetCommitmentState", - callGenesisInfoAndGetCommitmentState: false, - numberOfTxs: 5, - prevBlockHash: ethservice.BlockChain().GetBlockByNumber(2).Hash().Bytes(), - timestamp: ethservice.BlockChain().GetBlockByNumber(2).Time() + 2, - depositTxAmount: big.NewInt(0), - expectedReturnCode: codes.PermissionDenied, - }, - { - description: "ExecuteOptimisticBlock with 5 txs and no deposit tx", - callGenesisInfoAndGetCommitmentState: true, - numberOfTxs: 5, - prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(), - timestamp: ethservice.BlockChain().CurrentSafeBlock().Time + 2, - depositTxAmount: big.NewInt(0), - expectedReturnCode: 0, - }, - { - description: "ExecuteOptimisticBlock with 5 txs and a deposit tx", - callGenesisInfoAndGetCommitmentState: true, - numberOfTxs: 5, - prevBlockHash: ethservice.BlockChain().CurrentSafeBlock().Hash().Bytes(), - timestamp: ethservice.BlockChain().CurrentSafeBlock().Time + 2, - depositTxAmount: big.NewInt(1000000000000000000), - expectedReturnCode: 0, - }, - } - - for _, tt := range tests { - t.Run(tt.description, func(t *testing.T) { - // reset the blockchain with each test - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) - - var err error // adding this to prevent shadowing of genesisInfo in the below if branch - var genesisInfo *astriaPb.GenesisInfo - var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState - if tt.callGenesisInfoAndGetCommitmentState { - // call getGenesisInfo and getCommitmentState before calling executeBlock - genesisInfo, err = serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) - require.Nil(t, err, "GetGenesisInfo failed") - require.NotNil(t, genesisInfo, "GenesisInfo is nil") - - commitmentStateBeforeExecuteBlock, err = serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) - require.Nil(t, err, "GetCommitmentState failed") - require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil") - } - - // create the txs to send - // create 5 txs - txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1.RollupData{} - for i := 0; i < 5; i++ { - unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) - require.Nil(t, err, "Failed to sign tx") - txs = append(txs, tx) - - marshalledTx, err := tx.MarshalBinary() - require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, - }) - } - - // create deposit tx if depositTxAmount is non zero - if tt.depositTxAmount.Cmp(big.NewInt(0)) != 0 { - depositAmount := bigIntToProtoU128(tt.depositTxAmount) - bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress - bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom - - // create new chain destination address for better testing - chainDestinationAddressPrivKey, err := crypto.GenerateKey() - require.Nil(t, err, "Failed to generate chain destination address") - - chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey) - - depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ - BridgeAddress: &primitivev1.Address{ - Bech32M: bridgeAddress, - }, - Asset: bridgeAssetDenom, - Amount: depositAmount, - RollupId: genesisInfo.RollupId, - DestinationChainAddress: chainDestinationAddress.String(), - SourceTransactionId: &primitivev1.TransactionId{ - Inner: "test_tx_hash", - }, - SourceActionIndex: 0, - }}} - - marshalledTxs = append(marshalledTxs, depositTx) - } - - optimisticHeadCh := make(chan core.ChainOptimisticHeadEvent, 1) - optimsticHeadSub := ethservice.BlockChain().SubscribeChainOptimisticHeadEvent(optimisticHeadCh) - defer optimsticHeadSub.Unsubscribe() - - baseBlockReq := &optimsticPb.BaseBlock{ - Timestamp: ×tamppb.Timestamp{ - Seconds: int64(tt.timestamp), - }, - Transactions: marshalledTxs, - SequencerBlockHash: []byte("test_hash"), - } - - res, err := serviceV1Alpha1.ExecuteOptimisticBlock(context.Background(), baseBlockReq) - if tt.expectedReturnCode > 0 { - require.NotNil(t, err, "ExecuteOptimisticBlock should return an error") - require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteOptimisticBlock failed") - } else { - require.Nil(t, err, "ExecuteOptimisticBlock failed") - } - if err == nil { - require.NotNil(t, res, "ExecuteOptimisticBlock response is nil") - - astriaOrdered := ethservice.TxPool().AstriaOrdered() - require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - - // check if commitment state is not updated - commitmentStateAfterExecuteBlock, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) - require.Nil(t, err, "GetCommitmentState failed") - - require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated") - - // check if the optimistic block is set - optimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock() - require.NotNil(t, optimisticBlock, "Optimistic block is not set") - - // check if the optimistic block is correct - require.Equal(t, common.BytesToHash(res.Hash), optimisticBlock.Hash(), "Optimistic block hashes do not match") - require.Equal(t, common.BytesToHash(res.ParentBlockHash), optimisticBlock.ParentHash, "Optimistic block parent hashes do not match") - require.Equal(t, uint64(res.Number), optimisticBlock.Number.Uint64(), "Optimistic block numbers do not match") - - // check if optimistic block is inserted into chain - block := ethservice.BlockChain().GetBlockByHash(optimisticBlock.Hash()) - require.NotNil(t, block, "Optimistic block not found in blockchain") - require.Equal(t, uint64(res.Number), block.NumberU64(), "Block number is not correct") - - // timeout for optimistic head event - select { - case blockEvent := <-optimisticHeadCh: - require.NotNil(t, blockEvent, "Optimistic head event not received") - require.Equal(t, block.Hash(), blockEvent.Block.Hash(), "Optimistic head event block hash is not correct") - require.Equal(t, block.NumberU64(), blockEvent.Block.NumberU64(), "Optimistic head event block number is not correct") - case <-time.After(2 * time.Second): - require.FailNow(t, "Optimistic head event not received") - case err := <-optimsticHeadSub.Err(): - require.Nil(t, err, "Optimistic head event subscription failed") - } - } - }) - } -} - -func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) - - // call genesis info - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) - require.Nil(t, err, "GetGenesisInfo failed") - require.NotNil(t, genesisInfo, "GenesisInfo is nil") - - // call get commitment state - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) - require.Nil(t, err, "GetCommitmentState failed") - require.NotNil(t, commitmentState, "CommitmentState is nil") - - // get previous block hash - previousBlock := ethservice.BlockChain().CurrentSafeBlock() - require.NotNil(t, previousBlock, "Previous block not found") - - // create the optimistic block via the StreamExecuteOptimisticBlock rpc - requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{} - sequencerBlockHash := []byte("sequencer_block_hash") - - // create 1 stream item with 5 txs - txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1.RollupData{} - for i := 0; i < 5; i++ { - unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) - require.Nil(t, err, "Failed to sign tx") - txs = append(txs, tx) - - marshalledTx, err := tx.MarshalBinary() - require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, - }) - } - - req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{ - SequencerBlockHash: sequencerBlockHash, - Transactions: marshalledTxs, - Timestamp: ×tamppb.Timestamp{ - Seconds: int64(previousBlock.Time + 2), - }, - }} - - requestStreams = append(requestStreams, &req) - - mockBidirectionalStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{ - requestStream: requestStreams, - accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{}, - requestCounter: 0, - } - - errorCh := make(chan error) - go func(errorCh chan error) { - errorCh <- serviceV1Alpha1.ExecuteOptimisticBlockStream(mockBidirectionalStream) - }(errorCh) - - select { - // stream either errors out of gets closed - case err := <-errorCh: - require.Nil(t, err, "StreamExecuteOptimisticBlock failed") - } - - require.Len(t, mockBidirectionalStream.accumulatedResponses, 1, "Number of responses should match the number of requests") - accumulatedResponse := mockBidirectionalStream.accumulatedResponses[0] - - currentOptimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock() - require.NotNil(t, currentOptimisticBlock, "Optimistic block is not set") - require.True(t, bytes.Equal(accumulatedResponse.GetBlock().Hash, currentOptimisticBlock.Hash().Bytes()), "Optimistic block hashes do not match") - require.True(t, bytes.Equal(accumulatedResponse.GetBlock().ParentBlockHash, currentOptimisticBlock.ParentHash.Bytes()), "Optimistic block parent hashes do not match") - require.Equal(t, uint64(accumulatedResponse.GetBlock().Number), currentOptimisticBlock.Number.Uint64(), "Optimistic block numbers do not match") - - // assert mempool is cleared - astriaOrdered := ethservice.TxPool().AstriaOrdered() - require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - - pending, queued := ethservice.TxPool().Stats() - require.Equal(t, pending, 0, "Mempool should have 0 pending txs") - require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - - mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBundleStreamResponse]{ - sentResponses: []*optimsticPb.GetBundleStreamResponse{}, - } - - errorCh = make(chan error) - go func() { - errorCh <- serviceV1Alpha1.GetBundleStream(&mockServerSideStreaming) - }() - - // optimistic block is created, we can now add txs and check if they get streamed - // create 5 txs - txs = []*types.Transaction{} - for i := 5; i < 10; i++ { - unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) - require.Nil(t, err, "Failed to sign tx") - txs = append(txs, tx) - - marshalledTx, err := tx.MarshalBinary() - require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, - }) - } - - txErrors := ethservice.TxPool().Add(txs, true, false) - for _, txErr := range txErrors { - require.Nil(t, txErr, "Failed to add tx to mempool") - } - - pending, queued = ethservice.TxPool().Stats() - require.Equal(t, pending, 5, "Mempool should have 5 pending txs") - require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - - // give some time for the txs to stream - time.Sleep(5 * time.Second) - - // close the mempool to error the method out - err = ethservice.TxPool().Close() - require.Nil(t, err, "Failed to close mempool") - - select { - case err := <-errorCh: - require.ErrorContains(t, err, "error waiting for pending transactions") - } - - require.Len(t, mockServerSideStreaming.sentResponses, 5, "Number of responses should match the number of requests") - - for _, resp := range mockServerSideStreaming.sentResponses { - bundle := resp.GetBundle() - require.Len(t, bundle.Transactions, 1, "Bundle should have 1 tx") - require.True(t, bytes.Equal(bundle.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") - require.True(t, bytes.Equal(bundle.BaseSequencerBlockHash, *serviceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") - } -} - -func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlockStream(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) - - // call genesis info - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) - require.Nil(t, err, "GetGenesisInfo failed") - require.NotNil(t, genesisInfo, "GenesisInfo is nil") - - // call get commitment state - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) - require.Nil(t, err, "GetCommitmentState failed") - require.NotNil(t, commitmentState, "CommitmentState is nil") - - // get previous block hash - previousBlock := ethservice.BlockChain().CurrentSafeBlock() - require.NotNil(t, previousBlock, "Previous block not found") - - requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{} - sequencerBlockHash := []byte("sequencer_block_hash") - - // create 1 stream item with 5 txs - txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1.RollupData{} - for i := 0; i < 5; i++ { - unsignedTx := types.NewTransaction(uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) - require.Nil(t, err, "Failed to sign tx") - txs = append(txs, tx) - - marshalledTx, err := tx.MarshalBinary() - require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, - }) - } - - errs := ethservice.TxPool().Add(txs, true, false) - for _, err := range errs { - require.Nil(t, err, "Failed to add tx to mempool") - } - - pending, queued := ethservice.TxPool().Stats() - require.Equal(t, pending, 5, "Mempool should have 5 pending txs") - require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - - req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{ - SequencerBlockHash: sequencerBlockHash, - Transactions: marshalledTxs, - Timestamp: ×tamppb.Timestamp{ - Seconds: int64(previousBlock.Time + 2), - }, - }} - - requestStreams = append(requestStreams, &req) - - mockStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{ - requestStream: requestStreams, - accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{}, - requestCounter: 0, - } - - errorCh := make(chan error) - go func(errorCh chan error) { - errorCh <- serviceV1Alpha1.ExecuteOptimisticBlockStream(mockStream) - }(errorCh) - - select { - // the stream will either errors out or gets closed - case err := <-errorCh: - require.Nil(t, err, "StreamExecuteOptimisticBlock failed") - } - - accumulatedResponses := mockStream.accumulatedResponses - - require.Equal(t, len(accumulatedResponses), len(mockStream.requestStream), "Number of responses should match the number of requests") - - require.True(t, bytes.Equal(*serviceV1Alpha1.currentOptimisticSequencerBlock.Load(), sequencerBlockHash), "Optimistic sequencer block hash should be set correctly") - - blockCounter := 1 - for _, response := range accumulatedResponses { - require.True(t, bytes.Equal(response.GetBaseSequencerBlockHash(), sequencerBlockHash), "Sequencer block hash does not match") - block := response.GetBlock() - require.True(t, bytes.Equal(block.ParentBlockHash, previousBlock.Hash().Bytes()), "Parent block hash does not match") - requiredBlockNumber := big.NewInt(0).Add(previousBlock.Number, big.NewInt(int64(blockCounter))) - require.Equal(t, requiredBlockNumber.Uint64(), uint64(block.Number), "Block number is not correct") - blockCounter += 1 - } - - // ensure mempool is cleared - astriaOrdered := ethservice.TxPool().AstriaOrdered() - require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - - pending, queued = ethservice.TxPool().Stats() - require.Equal(t, pending, 0, "Mempool should have 0 pending txs") - require.Equal(t, queued, 0, "Mempool should have 0 queued txs") -} - // Check that invalid transactions are not added into a block and are removed from the mempool func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) @@ -905,14 +509,14 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval stateDb, err := ethservice.BlockChain().StateAt(previousBlock.Root()) require.Nil(t, err, "Failed to get state db") - latestNonce := stateDb.GetNonce(testAddr) + latestNonce := stateDb.GetNonce(shared.TestAddr) // create 5 txs txs := []*types.Transaction{} marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { - unsignedTx := types.NewTransaction(latestNonce+uint64(i), testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + unsignedTx := types.NewTransaction(latestNonce+uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) require.Nil(t, err, "Failed to sign tx") txs = append(txs, tx) @@ -923,9 +527,9 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval }) } - // add a tx with lesser gas than the base gas - unsignedTx := types.NewTransaction(latestNonce+uint64(5), testToAddress, big.NewInt(1), ethservice.BlockChain().GasLimit(), big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + // add a tx which takes up more gas than the block gas limit + unsignedTx := types.NewTransaction(latestNonce+uint64(5), shared.TestToAddress, big.NewInt(1), ethservice.BlockChain().GasLimit(), big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) require.Nil(t, err, "Failed to sign tx") txs = append(txs, tx) diff --git a/grpc/execution/test_setup.go b/grpc/execution/test_setup.go new file mode 100644 index 000000000..08f0413f4 --- /dev/null +++ b/grpc/execution/test_setup.go @@ -0,0 +1,11 @@ +package execution + +import ( + "github.com/ethereum/go-ethereum/grpc/shared" + "testing" +) + +func SetupExecutionService(t *testing.T, sharedService *shared.SharedServiceContainer) *ExecutionServiceServerV1 { + t.Helper() + return NewExecutionServiceServerV1Alpha2(sharedService) +} diff --git a/grpc/execution/validation.go b/grpc/execution/validation.go index 86dee556d..be4f675ad 100644 --- a/grpc/execution/validation.go +++ b/grpc/execution/validation.go @@ -1,113 +1,10 @@ package execution import ( - "crypto/sha256" + astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1alpha2" "fmt" - "math/big" - - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" - astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" - sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/contracts" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" ) -// `validateAndUnmarshalSequencerTx` validates and unmarshals the given rollup sequencer transaction. -// If the sequencer transaction is a deposit tx, we ensure that the asset ID is allowed and the bridge address is known. -// If the sequencer transaction is not a deposit tx, we unmarshal the sequenced data into an Ethereum transaction. We ensure that the -// tx is not a blob tx or a deposit tx. -func validateAndUnmarshalSequencerTx( - height uint64, - tx *sequencerblockv1.RollupData, - bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, - bridgeAllowedAssets map[string]struct{}, -) (*types.Transaction, error) { - if deposit := tx.GetDeposit(); deposit != nil { - bridgeAddress := deposit.BridgeAddress.GetBech32M() - bac, ok := bridgeAddresses[bridgeAddress] - if !ok { - return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress) - } - - if height < uint64(bac.StartHeight) { - return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight) - } - - if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok { - return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset) - } - - if deposit.Asset != bac.AssetDenom { - return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress) - } - - recipient := common.HexToAddress(deposit.DestinationChainAddress) - amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount)) - - if bac.Erc20Asset != nil { - log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress) - abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi() - if err != nil { - // this should never happen, as the abi is hardcoded in the contract bindings - return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err) - } - - // pack arguments for calling the `mint` function on the ERC20 contract - args := []interface{}{recipient, amount} - calldata, err := abi.Pack("mint", args...) - if err != nil { - return nil, err - } - - txdata := types.DepositTx{ - From: bac.SenderAddress, - Value: new(big.Int), // don't need to set this, as we aren't minting the native asset - // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer. - // - // the fees are spent from the "bridge account" which is not actually a real account, but is instead some - // address defined by consensus, so the gas cost is not actually deducted from any account. - Gas: 64000, - To: &bac.Erc20Asset.ContractAddress, - Data: calldata, - SourceTransactionId: *deposit.SourceTransactionId, - SourceTransactionIndex: deposit.SourceActionIndex, - } - - tx := types.NewTx(&txdata) - return tx, nil - } - - txdata := types.DepositTx{ - From: bac.SenderAddress, - To: &recipient, - Value: amount, - Gas: 0, - SourceTransactionId: *deposit.SourceTransactionId, - SourceTransactionIndex: deposit.SourceActionIndex, - } - return types.NewTx(&txdata), nil - } else { - ethTx := new(types.Transaction) - err := ethTx.UnmarshalBinary(tx.GetSequencedData()) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData())) - } - - if ethTx.Type() == types.DepositTxType { - return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) - } - - if ethTx.Type() == types.BlobTxType { - return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) - } - - return ethTx, nil - } -} - // `validateStaticExecuteBlockRequest` validates the given execute block request without regard // to the current state of the system. This is useful for validating the request before any // state changes or reads are made as a basic guard. @@ -122,17 +19,6 @@ func validateStaticExecuteBlockRequest(req *astriaPb.ExecuteBlockRequest) error return nil } -func validateStaticExecuteOptimisticBlockRequest(req *optimsticPb.BaseBlock) error { - if req.Timestamp == nil { - return fmt.Errorf("Timestamp cannot be nil") - } - if len(req.SequencerBlockHash) == 0 { - return fmt.Errorf("SequencerBlockHash cannot be empty") - } - - return nil -} - // `validateStaticCommitment` validates the given commitment without regard to the current state of the system. func validateStaticCommitmentState(commitmentState *astriaPb.CommitmentState) error { if commitmentState == nil { diff --git a/grpc/execution/mock_grpc_stream.go b/grpc/optimistic/mock_grpc_stream.go similarity index 99% rename from grpc/execution/mock_grpc_stream.go rename to grpc/optimistic/mock_grpc_stream.go index 6ab6ce78a..1696ff8be 100644 --- a/grpc/execution/mock_grpc_stream.go +++ b/grpc/optimistic/mock_grpc_stream.go @@ -1,4 +1,4 @@ -package execution +package optimistic import ( "context" diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go new file mode 100644 index 000000000..d48a5ed1e --- /dev/null +++ b/grpc/optimistic/server.go @@ -0,0 +1,292 @@ +package optimistic + +import ( + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1alpha2" + "context" + "errors" + "fmt" + "github.com/ethereum/go-ethereum/beacon/engine" + "github.com/ethereum/go-ethereum/common" + cmath "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/grpc/shared" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/miner" + "github.com/ethereum/go-ethereum/params" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + "io" + "math/big" + "sync" + "sync/atomic" + "time" +) + +type OptimisticServiceV1Alpha1 struct { + optimisticGrpc.UnimplementedOptimisticExecutionServiceServer + optimisticGrpc.UnimplementedBundleServiceServer + + sharedServiceContainer *shared.SharedServiceContainer + + currentOptimisticSequencerBlock atomic.Pointer[[]byte] +} + +var ( + executeOptimisticBlockRequestCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_requests", nil) + executeOptimisticBlockSuccessCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_success", nil) + + executionOptimisticBlockTimer = metrics.GetOrRegisterTimer("astria/optimistic/execute_optimistic_block_time", nil) +) + +func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceContainer) *OptimisticServiceV1Alpha1 { + optimisticService := &OptimisticServiceV1Alpha1{ + sharedServiceContainer: sharedServiceContainer, + } + + optimisticService.currentOptimisticSequencerBlock.Store(&[]byte{}) + + return optimisticService +} + +func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.StreamBundlesRequest, stream optimisticGrpc.BundleService_StreamBundlesServer) error { + pendingTxEventCh := make(chan core.NewTxsEvent) + pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) + defer pendingTxEvent.Unsubscribe() + + for { + select { + case pendingTxs := <-pendingTxEventCh: + // get the optimistic block + // this is an in-memory read, so there shouldn't be a lot of concerns on speed + optimisticBlock := o.Eth().BlockChain().CurrentOptimisticBlock() + + for _, pendingTx := range pendingTxs.Txs { + bundle := optimsticPb.Bundle{} + + totalCost := big.NewInt(0) + effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee)) + totalCost.Add(totalCost, effectiveTip) + + marshalledTxs := [][]byte{} + marshalledTx, err := pendingTx.MarshalBinary() + if err != nil { + return status.Errorf(codes.Internal, "error marshalling tx: %v", err) + } + marshalledTxs = append(marshalledTxs, marshalledTx) + + bundle.Fee = totalCost.Uint64() + bundle.Transactions = marshalledTxs + bundle.BaseSequencerBlockHash = *o.currentOptimisticSequencerBlock.Load() + bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() + + err = stream.Send(&bundle) + if err != nil { + return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) + } + } + + case err := <-pendingTxEvent.Err(): + return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) + } + } +} + +func (s *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_StreamExecuteOptimisticBlockServer) error { + mempoolClearingEventCh := make(chan core.NewMempoolCleared) + mempoolClearingEvent := s.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) + defer mempoolClearingEvent.Unsubscribe() + + for { + msg, err := stream.Recv() + // stream has been closed + if errors.Is(err, io.EOF) { + return nil + } + if err != nil { + return err + } + + baseBlock := msg.GetBlock() + + // execute the optimistic block and wait for the mempool clearing event + optimisticBlock, err := s.ExecuteOptimisticBlock(stream.Context(), baseBlock) + if err != nil { + return status.Error(codes.Internal, "failed to execute optimistic block") + } + optimisticBlockHash := common.BytesToHash(optimisticBlock.Hash) + + // listen to the mempool clearing event and send the response back to the auctioneer when the mempool is cleared + select { + case event := <-mempoolClearingEventCh: + if event.NewHead.Hash() != optimisticBlockHash { + return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") + } + s.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) + err = stream.Send(&optimsticPb.StreamExecuteOptimisticBlockResponse{ + Block: optimisticBlock, + BaseSequencerBlockHash: baseBlock.SequencerBlockHash, + }) + case <-time.After(500 * time.Millisecond): + return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") + case err := <-mempoolClearingEvent.Err(): + return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) + } + } +} + +func (s *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) { + // we need to execute the optimistic block + log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash) + executeOptimisticBlockRequestCount.Inc(1) + + if err := validateStaticExecuteOptimisticBlockRequest(req); err != nil { + log.Error("ExecuteOptimisticBlock called with invalid BaseBlock", "err", err) + return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("BaseBlock is invalid: %s", err.Error())) + } + + if !s.SyncMethodsCalled() { + return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called") + } + + // Deliberately called after lock, to more directly measure the time spent executing + executionStart := time.Now() + defer executionOptimisticBlockTimer.UpdateSince(executionStart) + + s.CommitmentUpdateLock().Lock() + // get the soft block + softBlock := s.Bc().CurrentSafeBlock() + s.CommitmentUpdateLock().Unlock() + + s.BlockExecutionLock().Lock() + nextFeeRecipient := s.NextFeeRecipient() + s.BlockExecutionLock().Unlock() + + // the height that this block will be at + height := s.Bc().CurrentBlock().Number.Uint64() + 1 + + txsToProcess := types.Transactions{} + for _, tx := range req.Transactions { + unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, s.BridgeAddresses(), s.BridgeAllowedAssets(), s.BridgeSenderAddress()) + if err != nil { + log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) + continue + } + + err = s.Eth().TxPool().ValidateTx(unmarshalledTx) + if err != nil { + log.Debug("failed to validate tx, ignoring", "tx", tx, "err", err) + continue + } + + txsToProcess = append(txsToProcess, unmarshalledTx) + } + + // Build a payload to add to the chain + payloadAttributes := &miner.BuildPayloadArgs{ + Parent: softBlock.Hash(), + Timestamp: uint64(req.GetTimestamp().GetSeconds()), + Random: common.Hash{}, + FeeRecipient: nextFeeRecipient, + OverrideTransactions: txsToProcess, + IsOptimisticExecution: true, + } + payload, err := s.Eth().Miner().BuildPayload(payloadAttributes) + if err != nil { + log.Error("failed to build payload", "err", err) + return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") + } + + block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil) + if err != nil { + log.Error("failed to convert executable data to block", err) + return nil, status.Error(codes.Internal, "failed to execute block") + } + + // this will insert the optimistic block into the chain and persist it's state without + // setting it as the HEAD. + err = s.Bc().InsertBlockWithoutSetHead(block) + if err != nil { + log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", block.ParentHash(), "err", err) + return nil, status.Error(codes.Internal, "failed to insert block to chain") + } + + // we store a pointer to the optimistic block in the chain so that we can use it + // to retrieve the state of the optimistic block + s.Bc().SetOptimistic(block) + + res := &astriaPb.Block{ + Number: uint32(block.NumberU64()), + Hash: block.Hash().Bytes(), + ParentBlockHash: block.ParentHash().Bytes(), + Timestamp: ×tamppb.Timestamp{ + Seconds: int64(block.Time()), + }, + } + + log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) + executeOptimisticBlockSuccessCount.Inc(1) + + return res, nil +} + +func (o *OptimisticServiceV1Alpha1) Eth() *eth.Ethereum { + return o.sharedServiceContainer.Eth() +} + +func (o *OptimisticServiceV1Alpha1) Bc() *core.BlockChain { + return o.sharedServiceContainer.Bc() +} + +func (o *OptimisticServiceV1Alpha1) SetGenesisInfoCalled(value bool) { + o.sharedServiceContainer.SetGenesisInfoCalled(value) +} + +func (o *OptimisticServiceV1Alpha1) GenesisInfoCalled() bool { + return o.sharedServiceContainer.GenesisInfoCalled() +} + +func (o *OptimisticServiceV1Alpha1) SetGetCommitmentStateCalled(value bool) { + o.sharedServiceContainer.SetGetCommitmentStateCalled(value) +} + +func (o *OptimisticServiceV1Alpha1) CommitmentStateCalled() bool { + return o.sharedServiceContainer.CommitmentStateCalled() +} + +func (o *OptimisticServiceV1Alpha1) CommitmentUpdateLock() *sync.Mutex { + return o.sharedServiceContainer.CommitmentUpdateLock() +} + +func (o *OptimisticServiceV1Alpha1) BlockExecutionLock() *sync.Mutex { + return o.sharedServiceContainer.BlockExecutionLock() +} + +func (o *OptimisticServiceV1Alpha1) NextFeeRecipient() common.Address { + return o.sharedServiceContainer.NextFeeRecipient() +} + +func (o *OptimisticServiceV1Alpha1) SetNextFeeRecipient(feeRecipient common.Address) { + o.sharedServiceContainer.SetNextFeeRecipient(feeRecipient) +} + +func (s *OptimisticServiceV1Alpha1) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig { + return s.sharedServiceContainer.BridgeAddresses() +} + +func (s *OptimisticServiceV1Alpha1) BridgeAllowedAssets() map[string]struct{} { + return s.sharedServiceContainer.BridgeAllowedAssets() +} + +func (s *OptimisticServiceV1Alpha1) BridgeSenderAddress() common.Address { + return s.sharedServiceContainer.BridgeSenderAddress() +} + +func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool { + return s.sharedServiceContainer.SyncMethodsCalled() +} diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go new file mode 100644 index 000000000..66a279590 --- /dev/null +++ b/grpc/optimistic/server_test.go @@ -0,0 +1,439 @@ +package optimistic + +import ( + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1alpha2" + primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" + sequencerblockv1alpha1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1alpha1" + "bytes" + "context" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/grpc/execution" + "github.com/ethereum/go-ethereum/grpc/shared" + "github.com/ethereum/go-ethereum/params" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + "math/big" + "testing" + "time" +) + +func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { + lo := i.Uint64() + hi := new(big.Int).Rsh(i, 64).Uint64() + return &primitivev1.Uint128{Lo: lo, Hi: hi} +} + +func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { + ethService, _ := shared.SetupSharedService(t, 10) + + tests := []struct { + description string + callGenesisInfoAndGetCommitmentState bool + numberOfTxs int + prevBlockHash []byte + timestamp uint64 + depositTxAmount *big.Int // if this is non zero then we send a deposit tx + expectedReturnCode codes.Code + }{ + { + description: "ExecuteOptimisticBlock without calling GetGenesisInfo and GetCommitmentState", + callGenesisInfoAndGetCommitmentState: false, + numberOfTxs: 5, + prevBlockHash: ethService.BlockChain().GetBlockByNumber(2).Hash().Bytes(), + timestamp: ethService.BlockChain().GetBlockByNumber(2).Time() + 2, + depositTxAmount: big.NewInt(0), + expectedReturnCode: codes.PermissionDenied, + }, + { + description: "ExecuteOptimisticBlock with 5 txs and no deposit tx", + callGenesisInfoAndGetCommitmentState: true, + numberOfTxs: 5, + prevBlockHash: ethService.BlockChain().CurrentSafeBlock().Hash().Bytes(), + timestamp: ethService.BlockChain().CurrentSafeBlock().Time + 2, + depositTxAmount: big.NewInt(0), + expectedReturnCode: 0, + }, + { + description: "ExecuteOptimisticBlock with 5 txs and a deposit tx", + callGenesisInfoAndGetCommitmentState: true, + numberOfTxs: 5, + prevBlockHash: ethService.BlockChain().CurrentSafeBlock().Hash().Bytes(), + timestamp: ethService.BlockChain().CurrentSafeBlock().Time + 2, + depositTxAmount: big.NewInt(1000000000000000000), + expectedReturnCode: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.description, func(t *testing.T) { + ethservice, sharedService := shared.SetupSharedService(t, 10) + + // reset the blockchain with each test + optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) + executionServiceV1Alpha1 := execution.SetupExecutionService(t, sharedService) + + var err error // adding this to prevent shadowing of genesisInfo in the below if branch + var genesisInfo *astriaPb.GenesisInfo + var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState + if tt.callGenesisInfoAndGetCommitmentState { + // call getGenesisInfo and getCommitmentState before calling executeBlock + genesisInfo, err = executionServiceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + require.Nil(t, err, "GetGenesisInfo failed") + require.NotNil(t, genesisInfo, "GenesisInfo is nil") + + commitmentStateBeforeExecuteBlock, err = executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + require.Nil(t, err, "GetCommitmentState failed") + require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil") + } + + // create the txs to send + // create 5 txs + txs := []*types.Transaction{} + marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + for i := 0; i < 5; i++ { + unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) + require.Nil(t, err, "Failed to sign tx") + txs = append(txs, tx) + + marshalledTx, err := tx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ + Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + }) + } + + // create deposit tx if depositTxAmount is non zero + if tt.depositTxAmount.Cmp(big.NewInt(0)) != 0 { + depositAmount := bigIntToProtoU128(tt.depositTxAmount) + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + + // create new chain destination address for better testing + chainDestinationAddressPrivKey, err := crypto.GenerateKey() + require.Nil(t, err, "Failed to generate chain destination address") + + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey) + + depositTx := &sequencerblockv1alpha1.RollupData{Value: &sequencerblockv1alpha1.RollupData_Deposit{Deposit: &sequencerblockv1alpha1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: depositAmount, + RollupId: genesisInfo.RollupId, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + marshalledTxs = append(marshalledTxs, depositTx) + } + + optimisticHeadCh := make(chan core.ChainOptimisticHeadEvent, 1) + optimsticHeadSub := ethservice.BlockChain().SubscribeChainOptimisticHeadEvent(optimisticHeadCh) + defer optimsticHeadSub.Unsubscribe() + + baseBlockReq := &optimsticPb.BaseBlock{ + Timestamp: ×tamppb.Timestamp{ + Seconds: int64(tt.timestamp), + }, + Transactions: marshalledTxs, + SequencerBlockHash: []byte("test_hash"), + } + + res, err := optimisticServiceV1Alpha1.ExecuteOptimisticBlock(context.Background(), baseBlockReq) + if tt.expectedReturnCode > 0 { + require.NotNil(t, err, "ExecuteOptimisticBlock should return an error") + require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteOptimisticBlock failed") + } else { + require.Nil(t, err, "ExecuteOptimisticBlock failed") + } + if err == nil { + require.NotNil(t, res, "ExecuteOptimisticBlock response is nil") + + astriaOrdered := ethservice.TxPool().AstriaOrdered() + require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") + + // check if commitment state is not updated + commitmentStateAfterExecuteBlock, err := executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + require.Nil(t, err, "GetCommitmentState failed") + + require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated") + + // check if the optimistic block is set + optimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock() + require.NotNil(t, optimisticBlock, "Optimistic block is not set") + + // check if the optimistic block is correct + require.Equal(t, common.BytesToHash(res.Hash), optimisticBlock.Hash(), "Optimistic block hashes do not match") + require.Equal(t, common.BytesToHash(res.ParentBlockHash), optimisticBlock.ParentHash, "Optimistic block parent hashes do not match") + require.Equal(t, uint64(res.Number), optimisticBlock.Number.Uint64(), "Optimistic block numbers do not match") + + // check if optimistic block is inserted into chain + block := ethservice.BlockChain().GetBlockByHash(optimisticBlock.Hash()) + require.NotNil(t, block, "Optimistic block not found in blockchain") + require.Equal(t, uint64(res.Number), block.NumberU64(), "Block number is not correct") + + // timeout for optimistic head event + select { + case blockEvent := <-optimisticHeadCh: + require.NotNil(t, blockEvent, "Optimistic head event not received") + require.Equal(t, block.Hash(), blockEvent.Block.Hash(), "Optimistic head event block hash is not correct") + require.Equal(t, block.NumberU64(), blockEvent.Block.NumberU64(), "Optimistic head event block number is not correct") + case <-time.After(2 * time.Second): + require.FailNow(t, "Optimistic head event not received") + case err := <-optimsticHeadSub.Err(): + require.Nil(t, err, "Optimistic head event subscription failed") + } + } + }) + } +} + +func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { + ethservice, sharedService := shared.SetupSharedService(t, 10) + + optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) + executionServiceV1Alpha1 := execution.SetupExecutionService(t, sharedService) + + // call genesis info + genesisInfo, err := executionServiceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + require.Nil(t, err, "GetGenesisInfo failed") + require.NotNil(t, genesisInfo, "GenesisInfo is nil") + + // call get commitment state + commitmentState, err := executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + require.Nil(t, err, "GetCommitmentState failed") + require.NotNil(t, commitmentState, "CommitmentState is nil") + + // get previous block hash + previousBlock := ethservice.BlockChain().CurrentSafeBlock() + require.NotNil(t, previousBlock, "Previous block not found") + + // create the optimistic block via the StreamExecuteOptimisticBlock rpc + requestStreams := []*optimsticPb.StreamExecuteOptimisticBlockRequest{} + sequencerBlockHash := []byte("sequencer_block_hash") + + // create 1 stream item with 5 txs + txs := []*types.Transaction{} + marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + for i := 0; i < 5; i++ { + unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) + require.Nil(t, err, "Failed to sign tx") + txs = append(txs, tx) + + marshalledTx, err := tx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ + Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + }) + } + + req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ + SequencerBlockHash: sequencerBlockHash, + Transactions: marshalledTxs, + Timestamp: ×tamppb.Timestamp{ + Seconds: int64(previousBlock.Time + 2), + }, + }} + + requestStreams = append(requestStreams, &req) + + mockBidirectionalStream := &MockBidirectionalStreaming[optimsticPb.StreamExecuteOptimisticBlockRequest, optimsticPb.StreamExecuteOptimisticBlockResponse]{ + requestStream: requestStreams, + accumulatedResponses: []*optimsticPb.StreamExecuteOptimisticBlockResponse{}, + requestCounter: 0, + } + + errorCh := make(chan error) + go func(errorCh chan error) { + errorCh <- optimisticServiceV1Alpha1.StreamExecuteOptimisticBlock(mockBidirectionalStream) + }(errorCh) + + select { + // stream either errors out of gets closed + case err := <-errorCh: + require.Nil(t, err, "StreamExecuteOptimisticBlock failed") + } + + require.Len(t, mockBidirectionalStream.accumulatedResponses, 1, "Number of responses should match the number of requests") + accumulatedResponse := mockBidirectionalStream.accumulatedResponses[0] + + currentOptimisticBlock := ethservice.BlockChain().CurrentOptimisticBlock() + require.NotNil(t, currentOptimisticBlock, "Optimistic block is not set") + require.True(t, bytes.Equal(accumulatedResponse.GetBlock().Hash, currentOptimisticBlock.Hash().Bytes()), "Optimistic block hashes do not match") + require.True(t, bytes.Equal(accumulatedResponse.GetBlock().ParentBlockHash, currentOptimisticBlock.ParentHash.Bytes()), "Optimistic block parent hashes do not match") + require.Equal(t, uint64(accumulatedResponse.GetBlock().Number), currentOptimisticBlock.Number.Uint64(), "Optimistic block numbers do not match") + + // assert mempool is cleared + astriaOrdered := ethservice.TxPool().AstriaOrdered() + require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") + + pendingTxs := ethservice.TxPool().Pending(txpool.PendingFilter{ + OnlyPlainTxs: true, + }) + require.Equal(t, len(pendingTxs), 0, "Mempool should be empty") + + mockServerSideStreaming := MockServerSideStreaming[optimsticPb.Bundle]{ + sentResponses: []*optimsticPb.Bundle{}, + } + + errorCh = make(chan error) + go func() { + errorCh <- optimisticServiceV1Alpha1.StreamBundles(&optimsticPb.StreamBundlesRequest{}, &mockServerSideStreaming) + }() + + // optimistic block is created, we can now add txs and check if they get streamed + // create 5 txs + txs = []*types.Transaction{} + for i := 5; i < 10; i++ { + unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) + require.Nil(t, err, "Failed to sign tx") + txs = append(txs, tx) + + marshalledTx, err := tx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ + Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + }) + } + + txErrors := ethservice.TxPool().Add(txs, true, false) + for _, txErr := range txErrors { + require.Nil(t, txErr, "Failed to add tx to mempool") + } + + pending, queued := ethservice.TxPool().Stats() + require.Equal(t, pending, 5, "Mempool should have 5 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") + + // give some time for the txs to stream + time.Sleep(5 * time.Second) + + // close the mempool to error the method out + err = ethservice.TxPool().Close() + require.Nil(t, err, "Failed to close mempool") + + select { + case err := <-errorCh: + require.ErrorContains(t, err, "error waiting for pending transactions") + } + + require.Len(t, mockServerSideStreaming.sentResponses, 5, "Number of responses should match the number of requests") + + for _, resp := range mockServerSideStreaming.sentResponses { + require.Len(t, resp.Transactions, 1, "Bundle should have 1 tx") + require.True(t, bytes.Equal(resp.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") + require.True(t, bytes.Equal(resp.BaseSequencerBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") + } +} + +func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing.T) { + ethservice, sharedService := shared.SetupSharedService(t, 10) + + optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) + executionServiceV1Alpha1 := execution.SetupExecutionService(t, sharedService) + + // call genesis info + genesisInfo, err := executionServiceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + require.Nil(t, err, "GetGenesisInfo failed") + require.NotNil(t, genesisInfo, "GenesisInfo is nil") + + // call get commitment state + commitmentState, err := executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + require.Nil(t, err, "GetCommitmentState failed") + require.NotNil(t, commitmentState, "CommitmentState is nil") + + // get previous block hash + previousBlock := ethservice.BlockChain().CurrentSafeBlock() + require.NotNil(t, previousBlock, "Previous block not found") + + requestStreams := []*optimsticPb.StreamExecuteOptimisticBlockRequest{} + sequencerBlockHash := []byte("sequencer_block_hash") + + // create 1 stream item with 5 txs + txs := []*types.Transaction{} + marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + for i := 0; i < 5; i++ { + unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) + require.Nil(t, err, "Failed to sign tx") + txs = append(txs, tx) + + marshalledTx, err := tx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ + Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + }) + } + + errs := ethservice.TxPool().Add(txs, true, false) + for _, err := range errs { + require.Nil(t, err, "Failed to add tx to mempool") + } + + pending, queued := ethservice.TxPool().Stats() + require.Equal(t, pending, 5, "Mempool should have 5 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") + + req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ + SequencerBlockHash: sequencerBlockHash, + Transactions: marshalledTxs, + Timestamp: ×tamppb.Timestamp{ + Seconds: int64(previousBlock.Time + 2), + }, + }} + + requestStreams = append(requestStreams, &req) + + mockStream := &MockBidirectionalStreaming[optimsticPb.StreamExecuteOptimisticBlockRequest, optimsticPb.StreamExecuteOptimisticBlockResponse]{ + requestStream: requestStreams, + accumulatedResponses: []*optimsticPb.StreamExecuteOptimisticBlockResponse{}, + requestCounter: 0, + } + + errorCh := make(chan error) + go func(errorCh chan error) { + errorCh <- optimisticServiceV1Alpha1.StreamExecuteOptimisticBlock(mockStream) + }(errorCh) + + select { + // the stream will either errors out or gets closed + case err := <-errorCh: + require.Nil(t, err, "StreamExecuteOptimisticBlock failed") + } + + accumulatedResponses := mockStream.accumulatedResponses + + require.Equal(t, len(accumulatedResponses), len(mockStream.requestStream), "Number of responses should match the number of requests") + + blockCounter := 1 + for _, response := range accumulatedResponses { + require.True(t, bytes.Equal(response.GetBaseSequencerBlockHash(), sequencerBlockHash), "Sequencer block hash does not match") + block := response.GetBlock() + require.True(t, bytes.Equal(block.ParentBlockHash, previousBlock.Hash().Bytes()), "Parent block hash does not match") + requiredBlockNumber := big.NewInt(0).Add(previousBlock.Number, big.NewInt(int64(blockCounter))) + require.Equal(t, requiredBlockNumber.Uint64(), uint64(block.Number), "Block number is not correct") + blockCounter += 1 + } + + // ensure mempool is cleared + astriaOrdered := ethservice.TxPool().AstriaOrdered() + require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") + + pending, queued = ethservice.TxPool().Stats() + require.Equal(t, pending, 0, "Mempool should have 0 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") +} diff --git a/grpc/optimistic/test_setup.go b/grpc/optimistic/test_setup.go new file mode 100644 index 000000000..444132739 --- /dev/null +++ b/grpc/optimistic/test_setup.go @@ -0,0 +1,12 @@ +package optimistic + +import ( + "github.com/ethereum/go-ethereum/grpc/shared" + "testing" +) + +func SetupOptimisticService(t *testing.T, sharedService *shared.SharedServiceContainer) *OptimisticServiceV1Alpha1 { + t.Helper() + + return NewOptimisticServiceV1Alpha(sharedService) +} diff --git a/grpc/optimistic/validation.go b/grpc/optimistic/validation.go new file mode 100644 index 000000000..a59420d73 --- /dev/null +++ b/grpc/optimistic/validation.go @@ -0,0 +1,17 @@ +package optimistic + +import ( + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + "fmt" +) + +func validateStaticExecuteOptimisticBlockRequest(req *optimsticPb.BaseBlock) error { + if req.Timestamp == nil { + return fmt.Errorf("Timestamp cannot be nil") + } + if len(req.SequencerBlockHash) == 0 { + return fmt.Errorf("SequencerBlockHash cannot be empty") + } + + return nil +} diff --git a/grpc/shared/container.go b/grpc/shared/container.go new file mode 100644 index 000000000..253e1b3c1 --- /dev/null +++ b/grpc/shared/container.go @@ -0,0 +1,166 @@ +package shared + +import ( + "errors" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/eth" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "sync" +) + +type SharedServiceContainer struct { + eth *eth.Ethereum + bc *core.BlockChain + + commitmentUpdateLock sync.Mutex // Lock for the forkChoiceUpdated method + blockExecutionLock sync.Mutex // Lock for the NewPayload method + + genesisInfoCalled bool + getCommitmentStateCalled bool + + bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account + bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty + bridgeSenderAddress common.Address // address from which AstriaBridgeableERC20 contracts are called + + // TODO: bharath - we could make this an atomic pointer??? + nextFeeRecipient common.Address // Fee recipient for the next block +} + +func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, error) { + bc := eth.BlockChain() + + if bc.Config().AstriaRollupName == "" { + return nil, errors.New("rollup name not set") + } + + if bc.Config().AstriaSequencerInitialHeight == 0 { + return nil, errors.New("sequencer initial height not set") + } + + if bc.Config().AstriaCelestiaInitialHeight == 0 { + return nil, errors.New("celestia initial height not set") + } + + if bc.Config().AstriaCelestiaHeightVariance == 0 { + return nil, errors.New("celestia height variance not set") + } + + bridgeAddresses := make(map[string]*params.AstriaBridgeAddressConfig) + bridgeAllowedAssets := make(map[string]struct{}) + if bc.Config().AstriaBridgeAddressConfigs == nil { + log.Warn("bridge addresses not set") + } else { + nativeBridgeSeen := false + for _, cfg := range bc.Config().AstriaBridgeAddressConfigs { + err := cfg.Validate(bc.Config().AstriaSequencerAddressPrefix) + if err != nil { + return nil, fmt.Errorf("invalid bridge address config: %w", err) + } + + if cfg.Erc20Asset == nil { + if nativeBridgeSeen { + return nil, errors.New("only one native bridge address is allowed") + } + nativeBridgeSeen = true + } + + if cfg.Erc20Asset != nil && bc.Config().AstriaBridgeSenderAddress == (common.Address{}) { + return nil, errors.New("astria bridge sender address must be set for bridged ERC20 assets") + } + + bridgeAddresses[cfg.BridgeAddress] = &cfg + bridgeAllowedAssets[cfg.AssetDenom] = struct{}{} + if cfg.Erc20Asset == nil { + log.Info("bridge for sequencer native asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom) + } else { + log.Info("bridge for ERC20 asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom, "contractAddress", cfg.Erc20Asset.ContractAddress) + } + } + } + + // To decrease compute cost, we identify the next fee recipient at the start + // and update it as we execute blocks. + nextFeeRecipient := common.Address{} + if bc.Config().AstriaFeeCollectors == nil { + log.Warn("fee asset collectors not set, assets will be burned") + } else { + maxHeightCollectorMatch := uint32(0) + nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1 + for height, collector := range bc.Config().AstriaFeeCollectors { + if height <= nextBlock && height > maxHeightCollectorMatch { + maxHeightCollectorMatch = height + nextFeeRecipient = collector + } + } + } + + sharedServiceContainer := &SharedServiceContainer{ + eth: eth, + bc: bc, + bridgeAddresses: bridgeAddresses, + bridgeAllowedAssets: bridgeAllowedAssets, + bridgeSenderAddress: bc.Config().AstriaBridgeSenderAddress, + nextFeeRecipient: nextFeeRecipient, + } + + return sharedServiceContainer, nil +} + +func (s *SharedServiceContainer) SyncMethodsCalled() bool { + return s.genesisInfoCalled && s.getCommitmentStateCalled +} + +func (s *SharedServiceContainer) Bc() *core.BlockChain { + return s.bc +} + +func (s *SharedServiceContainer) Eth() *eth.Ethereum { + return s.eth +} + +func (s *SharedServiceContainer) SetGenesisInfoCalled(value bool) { + s.genesisInfoCalled = value +} + +func (s *SharedServiceContainer) GenesisInfoCalled() bool { + return s.genesisInfoCalled +} + +func (s *SharedServiceContainer) SetGetCommitmentStateCalled(value bool) { + s.getCommitmentStateCalled = value +} + +func (s *SharedServiceContainer) CommitmentStateCalled() bool { + return s.getCommitmentStateCalled +} + +func (s *SharedServiceContainer) CommitmentUpdateLock() *sync.Mutex { + return &s.commitmentUpdateLock +} + +func (s *SharedServiceContainer) BlockExecutionLock() *sync.Mutex { + return &s.blockExecutionLock +} + +func (s *SharedServiceContainer) NextFeeRecipient() common.Address { + return s.nextFeeRecipient +} + +func (s *SharedServiceContainer) SetNextFeeRecipient(nextFeeRecipient common.Address) { + s.nextFeeRecipient = nextFeeRecipient +} + +func (s *SharedServiceContainer) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig { + return s.bridgeAddresses +} + +func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} { + return s.bridgeAllowedAssets +} + +func (s *SharedServiceContainer) BridgeSenderAddress() common.Address { + return s.bridgeSenderAddress +} diff --git a/grpc/shared/test_setup.go b/grpc/shared/test_setup.go new file mode 100644 index 000000000..5fb0aec21 --- /dev/null +++ b/grpc/shared/test_setup.go @@ -0,0 +1,32 @@ +package shared + +import ( + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/eth" + "github.com/stretchr/testify/require" + "testing" +) + +func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer) { + t.Helper() + genesis, blocks, bridgeAddress, feeCollectorKey := GenerateMergeChain(noOfBlocksToGenerate, true) + ethservice := StartEthService(t, genesis) + + sharedService, err := NewSharedServiceContainer(ethservice) + require.Nil(t, err, "can't create shared service") + + feeCollector := crypto.PubkeyToAddress(feeCollectorKey.PublicKey) + require.Equal(t, feeCollector, sharedService.NextFeeRecipient(), "nextFeeRecipient not set correctly") + + bridgeAsset := genesis.Config.AstriaBridgeAddressConfigs[0].AssetDenom + _, ok := sharedService.BridgeAllowedAssets()[bridgeAsset] + require.True(t, ok, "bridgeAllowedAssetIDs does not contain bridge asset id") + + _, ok = sharedService.BridgeAddresses()[bridgeAddress] + require.True(t, ok, "bridgeAddress not set correctly") + + _, err = ethservice.BlockChain().InsertChain(blocks) + require.Nil(t, err, "can't insert blocks") + + return ethservice, sharedService +} diff --git a/grpc/execution/test_utils.go b/grpc/shared/test_utils.go similarity index 69% rename from grpc/execution/test_utils.go rename to grpc/shared/test_utils.go index dedab1aa5..9782f9832 100644 --- a/grpc/execution/test_utils.go +++ b/grpc/shared/test_utils.go @@ -1,4 +1,4 @@ -package execution +package shared import ( "crypto/ecdsa" @@ -24,18 +24,18 @@ import ( ) var ( - // testKey is a private key to use for funding a tester account. - testKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + // TestKey is a private key to use for funding a tester account. + TestKey, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") // testAddr is the Ethereum address of the tester account. - testAddr = crypto.PubkeyToAddress(testKey.PublicKey) + TestAddr = crypto.PubkeyToAddress(TestKey.PublicKey) - testToAddress = common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a") + TestToAddress = common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a") testBalance = big.NewInt(2e18) ) -func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey) { +func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey) { config := *params.AllEthashProtocolChanges engine := consensus.Engine(beaconConsensus.New(ethash.NewFaker())) if merged { @@ -88,7 +88,7 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri genesis := &core.Genesis{ Config: &config, Alloc: core.GenesisAlloc{ - testAddr: {Balance: testBalance}, + TestAddr: {Balance: testBalance}, }, ExtraData: []byte("test genesis"), Timestamp: 9000, @@ -99,7 +99,7 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri generate := func(i int, g *core.BlockGen) { g.OffsetTime(5) g.SetExtra([]byte("test")) - tx, _ := types.SignTx(types.NewTransaction(testNonce, testToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), types.LatestSigner(&config), testKey) + tx, _ := types.SignTx(types.NewTransaction(testNonce, TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil), types.LatestSigner(&config), TestKey) g.AddTx(tx) testNonce++ } @@ -117,11 +117,11 @@ func generateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri } // startEthService creates a full node instance for testing. -func startEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum { +func StartEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum { n, err := node.New(&node.Config{}) require.Nil(t, err, "can't create node") mcfg := miner.DefaultConfig - mcfg.PendingFeeRecipient = testAddr + mcfg.PendingFeeRecipient = TestAddr ethcfg := ðconfig.Config{Genesis: genesis, SyncMode: downloader.FullSync, TrieTimeout: time.Minute, TrieDirtyCache: 256, TrieCleanCache: 256, Miner: mcfg} ethservice, err := eth.New(n, ethcfg) require.Nil(t, err, "can't create eth service") @@ -129,27 +129,3 @@ func startEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum { ethservice.SetSynced() return ethservice } - -func setupExecutionService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *ExecutionServiceServerV1) { - t.Helper() - genesis, blocks, bridgeAddress, feeCollectorKey := generateMergeChain(noOfBlocksToGenerate, true) - ethservice := startEthService(t, genesis) - - serviceV1Alpha1, err := NewExecutionServiceServerV1(ethservice) - require.Nil(t, err, "can't create execution service") - - feeCollector := crypto.PubkeyToAddress(feeCollectorKey.PublicKey) - require.Equal(t, feeCollector, serviceV1Alpha1.nextFeeRecipient, "nextFeeRecipient not set correctly") - - bridgeAsset := genesis.Config.AstriaBridgeAddressConfigs[0].AssetDenom - _, ok := serviceV1Alpha1.bridgeAllowedAssets[bridgeAsset] - require.True(t, ok, "bridgeAllowedAssetIDs does not contain bridge asset id") - - _, ok = serviceV1Alpha1.bridgeAddresses[bridgeAddress] - require.True(t, ok, "bridgeAddress not set correctly") - - _, err = ethservice.BlockChain().InsertChain(blocks) - require.Nil(t, err, "can't insert blocks") - - return ethservice, serviceV1Alpha1 -} diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go new file mode 100644 index 000000000..ccb0a9961 --- /dev/null +++ b/grpc/shared/validation.go @@ -0,0 +1,114 @@ +package shared + +import ( + primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" + sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" + "crypto/sha256" + "fmt" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/contracts" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + "math/big" +) + +func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { + lo := big.NewInt(0).SetUint64(u128.Lo) + hi := big.NewInt(0).SetUint64(u128.Hi) + hi.Lsh(hi, 64) + return lo.Add(lo, hi) +} + +// `validateAndUnmarshalSequencerTx` validates and unmarshals the given rollup sequencer transaction. +// If the sequencer transaction is a deposit tx, we ensure that the asset ID is allowed and the bridge address is known. +// If the sequencer transaction is not a deposit tx, we unmarshal the sequenced data into an Ethereum transaction. We ensure that the +// tx is not a blob tx or a deposit tx. +func ValidateAndUnmarshalSequencerTx( + height uint64, + tx *sequencerblockv1.RollupData, + bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, + bridgeAllowedAssets map[string]struct{}, +) (*types.Transaction, error) { + if deposit := tx.GetDeposit(); deposit != nil { + bridgeAddress := deposit.BridgeAddress.GetBech32M() + bac, ok := bridgeAddresses[bridgeAddress] + if !ok { + return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress) + } + + if height < uint64(bac.StartHeight) { + return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight) + } + + if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok { + return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset) + } + + if deposit.Asset != bac.AssetDenom { + return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress) + } + + recipient := common.HexToAddress(deposit.DestinationChainAddress) + amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount)) + + if bac.Erc20Asset != nil { + log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress) + abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi() + if err != nil { + // this should never happen, as the abi is hardcoded in the contract bindings + return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err) + } + + // pack arguments for calling the `mint` function on the ERC20 contract + args := []interface{}{recipient, amount} + calldata, err := abi.Pack("mint", args...) + if err != nil { + return nil, err + } + + txdata := types.DepositTx{ + From: bac.SenderAddress, + Value: new(big.Int), // don't need to set this, as we aren't minting the native asset + // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer. + // + // the fees are spent from the "bridge account" which is not actually a real account, but is instead some + // address defined by consensus, so the gas cost is not actually deducted from any account. + Gas: 64000, + To: &bac.Erc20Asset.ContractAddress, + Data: calldata, + SourceTransactionId: *deposit.SourceTransactionId, + SourceTransactionIndex: deposit.SourceActionIndex, + } + + tx := types.NewTx(&txdata) + return tx, nil + } + + txdata := types.DepositTx{ + From: bac.SenderAddress, + To: &recipient, + Value: amount, + Gas: 0, + SourceTransactionId: *deposit.SourceTransactionId, + SourceTransactionIndex: deposit.SourceActionIndex, + } + return types.NewTx(&txdata), nil + } else { + ethTx := new(types.Transaction) + err := ethTx.UnmarshalBinary(tx.GetSequencedData()) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData())) + } + + if ethTx.Type() == types.DepositTxType { + return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) + } + + if ethTx.Type() == types.BlobTxType { + return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) + } + + return ethTx, nil + } +} diff --git a/grpc/execution/validation_test.go b/grpc/shared/validation_test.go similarity index 90% rename from grpc/execution/validation_test.go rename to grpc/shared/validation_test.go index 9c2b149d6..e27db7aab 100644 --- a/grpc/execution/validation_test.go +++ b/grpc/shared/validation_test.go @@ -1,4 +1,4 @@ -package execution +package shared import ( "math/big" @@ -15,10 +15,16 @@ import ( "github.com/stretchr/testify/require" ) +func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { + lo := i.Uint64() + hi := new(big.Int).Rsh(i, 64).Uint64() + return &primitivev1.Uint128{Lo: lo, Hi: hi} +} + func testBlobTx() *types.Transaction { return types.NewTx(&types.BlobTx{ Nonce: 1, - To: testAddr, + To: TestAddr, Value: uint256.NewInt(1000), Gas: 1000, Data: []byte("data"), @@ -27,7 +33,7 @@ func testBlobTx() *types.Transaction { func testDepositTx() *types.Transaction { return types.NewTx(&types.DepositTx{ - From: testAddr, + From: TestAddr, Value: big.NewInt(1000), Gas: 1000, }) @@ -53,7 +59,7 @@ func generateBech32MAddress() string { } func TestSequenceTxValidation(t *testing.T) { - ethservice, serviceV1Alpha1 := setupExecutionService(t, 10) + ethservice, serviceV1Alpha1 := SetupSharedService(t, 10) blobTx, err := testBlobTx().MarshalBinary() require.Nil(t, err, "failed to marshal random blob tx: %v", err) @@ -62,7 +68,7 @@ func TestSequenceTxValidation(t *testing.T) { require.Nil(t, err, "failed to marshal random deposit tx: %v", err) unsignedTx := types.NewTransaction(uint64(0), common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"), big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), testKey) + tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), TestKey) require.Nil(t, err, "failed to sign tx: %v", err) validMarshalledTx, err := tx.MarshalBinary() @@ -77,7 +83,7 @@ func TestSequenceTxValidation(t *testing.T) { invalidHeightBridgeAssetDenom := "invalid-height-asset-denom" invalidHeightBridgeAddressBech32m := generateBech32MAddress() - serviceV1Alpha1.bridgeAddresses[invalidHeightBridgeAddressBech32m] = ¶ms.AstriaBridgeAddressConfig{ + serviceV1Alpha1.BridgeAddresses()[invalidHeightBridgeAddressBech32m] = ¶ms.AstriaBridgeAddressConfig{ AssetDenom: invalidHeightBridgeAssetDenom, StartHeight: 100, } @@ -196,7 +202,11 @@ func TestSequenceTxValidation(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { +<<<<<<< HEAD:grpc/execution/validation_test.go _, err := validateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.bridgeAddresses, serviceV1Alpha1.bridgeAllowedAssets) +======= + _, err := ValidateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), common.Address{}) +>>>>>>> 21f5aa7f7 (separate out execution api services and optimistic execution api services):grpc/shared/validation_test.go if test.wantErr == "" && err == nil { return } diff --git a/node/grpcstack.go b/node/grpcstack.go index 5619efde7..9b4d3ffeb 100644 --- a/node/grpcstack.go +++ b/node/grpcstack.go @@ -1,6 +1,7 @@ package node import ( + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" "net" "os" "sync" @@ -17,26 +18,34 @@ type GRPCServerHandler struct { tcpEndpoint string udsEndpoint string - server *grpc.Server + execServer *grpc.Server + optimisticServer *grpc.Server executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer + optimisticExecServ *optimisticGrpc.OptimisticExecutionServiceServer + streamBundleServ *optimisticGrpc.BundleServiceServer } // NewServer creates a new gRPC server. // It registers the execution service server. // It registers the gRPC server with the node so it can be stopped on shutdown. -func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, cfg *Config) error { - server := grpc.NewServer() +func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, streamBundleServ optimisticGrpc.BundleServiceServer, cfg *Config) error { + execServer, optimisticServer := grpc.NewServer(), grpc.NewServer() log.Info("gRPC server enabled", "tcpEndpoint", cfg.GRPCTcpEndpoint(), "udsEndpoint", cfg.GRPCUdsEndpoint()) serverHandler := &GRPCServerHandler{ tcpEndpoint: cfg.GRPCTcpEndpoint(), udsEndpoint: cfg.GRPCUdsEndpoint(), - server: server, + execServer: execServer, + optimisticServer: optimisticServer, executionServiceServerV1a2: &execServ, + optimisticExecServ: &optimisticExecServ, + streamBundleServ: &streamBundleServ, } - astriaGrpc.RegisterExecutionServiceServer(server, execServ) + astriaGrpc.RegisterExecutionServiceServer(execServer, execServ) + optimisticGrpc.RegisterOptimisticExecutionServiceServer(optimisticServer, optimisticExecServ) + optimisticGrpc.RegisterBundleServiceServer(optimisticServer, streamBundleServ) node.RegisterGRPCServer(serverHandler) return nil @@ -69,8 +78,8 @@ func (handler *GRPCServerHandler) Start() error { return err } - go handler.server.Serve(tcpLis) - go handler.server.Serve(udsLis) + go handler.execServer.Serve(tcpLis) + go handler.optimisticServer.Serve(udsLis) log.Info("gRPC server started", "tcpEndpoint", handler.tcpEndpoint, "udsEndpoint", handler.udsEndpoint) return nil } @@ -80,7 +89,8 @@ func (handler *GRPCServerHandler) Stop() error { handler.mu.Lock() defer handler.mu.Unlock() - handler.server.GracefulStop() + handler.execServer.GracefulStop() + handler.optimisticServer.GracefulStop() log.Info("gRPC server stopped", "tcpEndpoint", handler.tcpEndpoint, "udsEndpoint", handler.udsEndpoint) return nil } From 2f6e1eba4d7e5c08acabd02215a2e5bd706e11bd Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 17 Oct 2024 21:04:50 +0530 Subject: [PATCH 38/79] minor updates --- grpc/optimistic/server.go | 8 ++++---- grpc/shared/container.go | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index d48a5ed1e..db3327111 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -97,9 +97,9 @@ func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.StreamBundlesRe } } -func (s *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_StreamExecuteOptimisticBlockServer) error { +func (o *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_StreamExecuteOptimisticBlockServer) error { mempoolClearingEventCh := make(chan core.NewMempoolCleared) - mempoolClearingEvent := s.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) + mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() for { @@ -115,7 +115,7 @@ func (s *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimist baseBlock := msg.GetBlock() // execute the optimistic block and wait for the mempool clearing event - optimisticBlock, err := s.ExecuteOptimisticBlock(stream.Context(), baseBlock) + optimisticBlock, err := o.ExecuteOptimisticBlock(stream.Context(), baseBlock) if err != nil { return status.Error(codes.Internal, "failed to execute optimistic block") } @@ -127,7 +127,7 @@ func (s *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimist if event.NewHead.Hash() != optimisticBlockHash { return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") } - s.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) + o.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) err = stream.Send(&optimsticPb.StreamExecuteOptimisticBlockResponse{ Block: optimisticBlock, BaseSequencerBlockHash: baseBlock.SequencerBlockHash, diff --git a/grpc/shared/container.go b/grpc/shared/container.go index 253e1b3c1..da11722db 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -149,6 +149,7 @@ func (s *SharedServiceContainer) NextFeeRecipient() common.Address { return s.nextFeeRecipient } +// assumes that the block execution lock is being held func (s *SharedServiceContainer) SetNextFeeRecipient(nextFeeRecipient common.Address) { s.nextFeeRecipient = nextFeeRecipient } From df7504f6fafdd98e235776eab351dd7e23a122cb Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 17 Oct 2024 21:19:18 +0530 Subject: [PATCH 39/79] minor nits --- grpc/execution/test_setup.go | 1 + grpc/optimistic/server.go | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/grpc/execution/test_setup.go b/grpc/execution/test_setup.go index 08f0413f4..de25f5299 100644 --- a/grpc/execution/test_setup.go +++ b/grpc/execution/test_setup.go @@ -7,5 +7,6 @@ import ( func SetupExecutionService(t *testing.T, sharedService *shared.SharedServiceContainer) *ExecutionServiceServerV1 { t.Helper() + return NewExecutionServiceServerV1Alpha2(sharedService) } diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index db3327111..554a2293c 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -140,7 +140,7 @@ func (o *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimist } } -func (s *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) { +func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) { // we need to execute the optimistic block log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash) executeOptimisticBlockRequestCount.Inc(1) @@ -150,7 +150,7 @@ func (s *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("BaseBlock is invalid: %s", err.Error())) } - if !s.SyncMethodsCalled() { + if !o.SyncMethodsCalled() { return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called") } @@ -158,27 +158,27 @@ func (s *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, executionStart := time.Now() defer executionOptimisticBlockTimer.UpdateSince(executionStart) - s.CommitmentUpdateLock().Lock() + o.CommitmentUpdateLock().Lock() // get the soft block - softBlock := s.Bc().CurrentSafeBlock() - s.CommitmentUpdateLock().Unlock() + softBlock := o.Bc().CurrentSafeBlock() + o.CommitmentUpdateLock().Unlock() - s.BlockExecutionLock().Lock() - nextFeeRecipient := s.NextFeeRecipient() - s.BlockExecutionLock().Unlock() + o.BlockExecutionLock().Lock() + nextFeeRecipient := o.NextFeeRecipient() + o.BlockExecutionLock().Unlock() // the height that this block will be at - height := s.Bc().CurrentBlock().Number.Uint64() + 1 + height := o.Bc().CurrentBlock().Number.Uint64() + 1 txsToProcess := types.Transactions{} for _, tx := range req.Transactions { - unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, s.BridgeAddresses(), s.BridgeAllowedAssets(), s.BridgeSenderAddress()) + unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, o.BridgeAddresses(), o.BridgeAllowedAssets(), o.BridgeSenderAddress()) if err != nil { log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) continue } - err = s.Eth().TxPool().ValidateTx(unmarshalledTx) + err = o.Eth().TxPool().ValidateTx(unmarshalledTx) if err != nil { log.Debug("failed to validate tx, ignoring", "tx", tx, "err", err) continue @@ -196,7 +196,7 @@ func (s *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, OverrideTransactions: txsToProcess, IsOptimisticExecution: true, } - payload, err := s.Eth().Miner().BuildPayload(payloadAttributes) + payload, err := o.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") @@ -210,7 +210,7 @@ func (s *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, // this will insert the optimistic block into the chain and persist it's state without // setting it as the HEAD. - err = s.Bc().InsertBlockWithoutSetHead(block) + err = o.Bc().InsertBlockWithoutSetHead(block) if err != nil { log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", block.ParentHash(), "err", err) return nil, status.Error(codes.Internal, "failed to insert block to chain") @@ -218,7 +218,7 @@ func (s *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, // we store a pointer to the optimistic block in the chain so that we can use it // to retrieve the state of the optimistic block - s.Bc().SetOptimistic(block) + o.Bc().SetOptimistic(block) res := &astriaPb.Block{ Number: uint32(block.NumberU64()), From 128be5760e184ff88267a7f3a158b744d29c7738 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 17 Oct 2024 22:22:38 +0530 Subject: [PATCH 40/79] make tests more robust --- grpc/optimistic/server_test.go | 39 ++++++++++++++++++++++++++-------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index 66a279590..28b0a2425 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -9,7 +9,6 @@ import ( "context" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/txpool" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/grpc/execution" @@ -240,6 +239,15 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { }) } + txErrors := ethservice.TxPool().Add(txs, true, false) + for _, txErr := range txErrors { + require.Nil(t, txErr, "Failed to add tx to mempool") + } + + pending, queued := ethservice.TxPool().Stats() + require.Equal(t, pending, 5, "Mempool should have 5 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") + req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ SequencerBlockHash: sequencerBlockHash, Transactions: marshalledTxs, @@ -280,10 +288,9 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { astriaOrdered := ethservice.TxPool().AstriaOrdered() require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") - pendingTxs := ethservice.TxPool().Pending(txpool.PendingFilter{ - OnlyPlainTxs: true, - }) - require.Equal(t, len(pendingTxs), 0, "Mempool should be empty") + pending, queued = ethservice.TxPool().Stats() + require.Equal(t, pending, 0, "Mempool should have 0 pending txs") + require.Equal(t, queued, 0, "Mempool should have 0 queued txs") mockServerSideStreaming := MockServerSideStreaming[optimsticPb.Bundle]{ sentResponses: []*optimsticPb.Bundle{}, @@ -294,11 +301,16 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { errorCh <- optimisticServiceV1Alpha1.StreamBundles(&optimsticPb.StreamBundlesRequest{}, &mockServerSideStreaming) }() + stateDb, err := ethservice.BlockChain().StateAt(currentOptimisticBlock.Root) + require.Nil(t, err, "Failed to get state db") + + latestNonce := stateDb.GetNonce(shared.TestAddr) + // optimistic block is created, we can now add txs and check if they get streamed // create 5 txs txs = []*types.Transaction{} - for i := 5; i < 10; i++ { - unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) + for i := 0; i < 5; i++ { + unsignedTx := types.NewTransaction(latestNonce+uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) require.Nil(t, err, "Failed to sign tx") txs = append(txs, tx) @@ -310,12 +322,12 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { }) } - txErrors := ethservice.TxPool().Add(txs, true, false) + txErrors = ethservice.TxPool().Add(txs, true, false) for _, txErr := range txErrors { require.Nil(t, txErr, "Failed to add tx to mempool") } - pending, queued := ethservice.TxPool().Stats() + pending, queued = ethservice.TxPool().Stats() require.Equal(t, pending, 5, "Mempool should have 5 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") @@ -333,8 +345,17 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { require.Len(t, mockServerSideStreaming.sentResponses, 5, "Number of responses should match the number of requests") + txIndx := 0 for _, resp := range mockServerSideStreaming.sentResponses { require.Len(t, resp.Transactions, 1, "Bundle should have 1 tx") + + receivedTx := resp.Transactions[0] + sentTx := txs[txIndx] + marshalledSentTx, err := sentTx.MarshalBinary() + require.Nil(t, err, "Failed to marshal tx") + require.True(t, bytes.Equal(receivedTx, marshalledSentTx), "Received tx does not match sent tx") + txIndx += 1 + require.True(t, bytes.Equal(resp.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") require.True(t, bytes.Equal(resp.BaseSequencerBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") } From 99560facfb1dc2a377ad7cbaeeee48c3037823e0 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 24 Oct 2024 21:13:13 +0530 Subject: [PATCH 41/79] move BigIntoToProtoU128 to shared test utils --- grpc/execution/server_test.go | 10 ++-------- grpc/optimistic/server_test.go | 8 +------- grpc/shared/test_utils.go | 7 +++++++ 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index e388fe321..fc5128229 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -195,12 +195,6 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { } } -func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { - lo := i.Uint64() - hi := new(big.Int).Rsh(i, 64).Uint64() - return &primitivev1.Uint128{Lo: lo, Hi: hi} -} - func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { ethservice, _ := shared.SetupSharedService(t, 10) @@ -290,7 +284,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { // create deposit tx if depositTxAmount is non zero if tt.depositTxAmount.Cmp(big.NewInt(0)) != 0 { - depositAmount := bigIntToProtoU128(tt.depositTxAmount) + depositAmount := shared.BigIntToProtoU128(tt.depositTxAmount) bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom @@ -382,7 +376,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi } amountToDeposit := big.NewInt(1000000000000000000) - depositAmount := bigIntToProtoU128(amountToDeposit) + depositAmount := shared.BigIntToProtoU128(amountToDeposit) bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index 28b0a2425..1eba034c4 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -23,12 +23,6 @@ import ( "time" ) -func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { - lo := i.Uint64() - hi := new(big.Int).Rsh(i, 64).Uint64() - return &primitivev1.Uint128{Lo: lo, Hi: hi} -} - func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { ethService, _ := shared.SetupSharedService(t, 10) @@ -111,7 +105,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { // create deposit tx if depositTxAmount is non zero if tt.depositTxAmount.Cmp(big.NewInt(0)) != 0 { - depositAmount := bigIntToProtoU128(tt.depositTxAmount) + depositAmount := shared.BigIntToProtoU128(tt.depositTxAmount) bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom diff --git a/grpc/shared/test_utils.go b/grpc/shared/test_utils.go index 9782f9832..82033ea5c 100644 --- a/grpc/shared/test_utils.go +++ b/grpc/shared/test_utils.go @@ -1,6 +1,7 @@ package shared import ( + primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" "crypto/ecdsa" "math/big" "testing" @@ -129,3 +130,9 @@ func StartEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum { ethservice.SetSynced() return ethservice } + +func BigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { + lo := i.Uint64() + hi := new(big.Int).Rsh(i, 64).Uint64() + return &primitivev1.Uint128{Lo: lo, Hi: hi} +} From a62047e1f66a966ce76bb135446179cd15b35e1d Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 29 Oct 2024 16:04:01 +0530 Subject: [PATCH 42/79] minor updates --- cmd/geth/config.go | 10 +----- grpc/execution/server.go | 6 +--- grpc/execution/test_setup.go | 2 +- grpc/execution/validation.go | 2 +- grpc/optimistic/server.go | 18 ++++------ grpc/optimistic/server_test.go | 60 ++++++++++++++++++---------------- grpc/shared/container.go | 12 ++----- grpc/shared/validation_test.go | 6 +--- 8 files changed, 46 insertions(+), 70 deletions(-) diff --git a/cmd/geth/config.go b/cmd/geth/config.go index a9eb24653..c59520ae6 100644 --- a/cmd/geth/config.go +++ b/cmd/geth/config.go @@ -208,24 +208,16 @@ func makeFullNode(ctx *cli.Context) *node.Node { // Configure gRPC if requested. if ctx.IsSet(utils.GRPCEnabledFlag.Name) { -<<<<<<< HEAD - serviceV1, err := execution.NewExecutionServiceServerV1(eth) -======= sharedService, err := shared.NewSharedServiceContainer(eth) ->>>>>>> 21f5aa7f7 (separate out execution api services and optimistic execution api services) if err != nil { utils.Fatalf("failed to create shared service container: %v", err) } -<<<<<<< HEAD - utils.RegisterGRPCExecutionService(stack, serviceV1, &cfg.Node) -======= - serviceV1a2 := execution.NewExecutionServiceServerV1Alpha2(sharedService) + serviceV1a2 := execution.NewExecutionServiceServerV1(sharedService) optimisticServiceV1a1 := optimistic.NewOptimisticServiceV1Alpha(sharedService) utils.RegisterGRPCServices(stack, serviceV1a2, optimisticServiceV1a1, optimisticServiceV1a1, &cfg.Node) ->>>>>>> 21f5aa7f7 (separate out execution api services and optimistic execution api services) } // Add the Ethereum Stats daemon if requested. diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 879db740e..3f4d5c5be 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -61,7 +61,7 @@ var ( commitmentStateUpdateTimer = metrics.GetOrRegisterTimer("astria/execution/commitment", nil) ) -func NewExecutionServiceServerV1Alpha2(sharedServiceContainer *shared.SharedServiceContainer) *ExecutionServiceServerV1 { +func NewExecutionServiceServerV1(sharedServiceContainer *shared.SharedServiceContainer) *ExecutionServiceServerV1 { execServiceServerV1Alpha2 := &ExecutionServiceServerV1{ sharedServiceContainer: sharedServiceContainer, } @@ -435,10 +435,6 @@ func (s *ExecutionServiceServerV1) BridgeAllowedAssets() map[string]struct{} { return s.sharedServiceContainer.BridgeAllowedAssets() } -func (s *ExecutionServiceServerV1) BridgeSenderAddress() common.Address { - return s.sharedServiceContainer.BridgeSenderAddress() -} - func (s *ExecutionServiceServerV1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } diff --git a/grpc/execution/test_setup.go b/grpc/execution/test_setup.go index de25f5299..fafa48ed7 100644 --- a/grpc/execution/test_setup.go +++ b/grpc/execution/test_setup.go @@ -8,5 +8,5 @@ import ( func SetupExecutionService(t *testing.T, sharedService *shared.SharedServiceContainer) *ExecutionServiceServerV1 { t.Helper() - return NewExecutionServiceServerV1Alpha2(sharedService) + return NewExecutionServiceServerV1(sharedService) } diff --git a/grpc/execution/validation.go b/grpc/execution/validation.go index be4f675ad..5a5cc9957 100644 --- a/grpc/execution/validation.go +++ b/grpc/execution/validation.go @@ -1,7 +1,7 @@ package execution import ( - astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1alpha2" + astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" "fmt" ) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 554a2293c..3d3f57ba2 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -3,7 +3,7 @@ package optimistic import ( optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" - astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1alpha2" + astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" "context" "errors" "fmt" @@ -54,7 +54,7 @@ func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceCon return optimisticService } -func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.StreamBundlesRequest, stream optimisticGrpc.BundleService_StreamBundlesServer) error { +func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() @@ -85,7 +85,7 @@ func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.StreamBundlesRe bundle.BaseSequencerBlockHash = *o.currentOptimisticSequencerBlock.Load() bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() - err = stream.Send(&bundle) + err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) if err != nil { return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) } @@ -97,7 +97,7 @@ func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.StreamBundlesRe } } -func (o *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_StreamExecuteOptimisticBlockServer) error { +func (o *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() @@ -112,7 +112,7 @@ func (o *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimist return err } - baseBlock := msg.GetBlock() + baseBlock := msg.GetBaseBlock() // execute the optimistic block and wait for the mempool clearing event optimisticBlock, err := o.ExecuteOptimisticBlock(stream.Context(), baseBlock) @@ -128,7 +128,7 @@ func (o *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimist return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") } o.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) - err = stream.Send(&optimsticPb.StreamExecuteOptimisticBlockResponse{ + err = stream.Send(&optimsticPb.ExecuteOptimisticBlockStreamResponse{ Block: optimisticBlock, BaseSequencerBlockHash: baseBlock.SequencerBlockHash, }) @@ -172,7 +172,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, txsToProcess := types.Transactions{} for _, tx := range req.Transactions { - unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, o.BridgeAddresses(), o.BridgeAllowedAssets(), o.BridgeSenderAddress()) + unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, o.BridgeAddresses(), o.BridgeAllowedAssets()) if err != nil { log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) continue @@ -283,10 +283,6 @@ func (s *OptimisticServiceV1Alpha1) BridgeAllowedAssets() map[string]struct{} { return s.sharedServiceContainer.BridgeAllowedAssets() } -func (s *OptimisticServiceV1Alpha1) BridgeSenderAddress() common.Address { - return s.sharedServiceContainer.BridgeSenderAddress() -} - func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index 1eba034c4..730c4d22b 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -2,9 +2,9 @@ package optimistic import ( optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" - astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1alpha2" + astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" - sequencerblockv1alpha1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1alpha1" + sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" "bytes" "context" "github.com/ethereum/go-ethereum/common" @@ -89,7 +89,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { // create the txs to send // create 5 txs txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) @@ -98,8 +98,8 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ - Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } @@ -115,7 +115,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationAddressPrivKey.PublicKey) - depositTx := &sequencerblockv1alpha1.RollupData{Value: &sequencerblockv1alpha1.RollupData_Deposit{Deposit: &sequencerblockv1alpha1.Deposit{ + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: bridgeAddress, }, @@ -214,12 +214,12 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { require.NotNil(t, previousBlock, "Previous block not found") // create the optimistic block via the StreamExecuteOptimisticBlock rpc - requestStreams := []*optimsticPb.StreamExecuteOptimisticBlockRequest{} + requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{} sequencerBlockHash := []byte("sequencer_block_hash") // create 1 stream item with 5 txs txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) @@ -228,8 +228,8 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ - Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } @@ -242,7 +242,7 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { require.Equal(t, pending, 5, "Mempool should have 5 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ + req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{ SequencerBlockHash: sequencerBlockHash, Transactions: marshalledTxs, Timestamp: ×tamppb.Timestamp{ @@ -252,9 +252,9 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { requestStreams = append(requestStreams, &req) - mockBidirectionalStream := &MockBidirectionalStreaming[optimsticPb.StreamExecuteOptimisticBlockRequest, optimsticPb.StreamExecuteOptimisticBlockResponse]{ + mockBidirectionalStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{ requestStream: requestStreams, - accumulatedResponses: []*optimsticPb.StreamExecuteOptimisticBlockResponse{}, + accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{}, requestCounter: 0, } @@ -286,13 +286,13 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { require.Equal(t, pending, 0, "Mempool should have 0 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - mockServerSideStreaming := MockServerSideStreaming[optimsticPb.Bundle]{ - sentResponses: []*optimsticPb.Bundle{}, + mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBundleStreamResponse]{ + sentResponses: []*optimsticPb.GetBundleStreamResponse{}, } errorCh = make(chan error) go func() { - errorCh <- optimisticServiceV1Alpha1.StreamBundles(&optimsticPb.StreamBundlesRequest{}, &mockServerSideStreaming) + errorCh <- optimisticServiceV1Alpha1.StreamBundles(&optimsticPb.GetBundleStreamRequest{}, &mockServerSideStreaming) }() stateDb, err := ethservice.BlockChain().StateAt(currentOptimisticBlock.Root) @@ -311,8 +311,8 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ - Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } @@ -341,17 +341,19 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { txIndx := 0 for _, resp := range mockServerSideStreaming.sentResponses { - require.Len(t, resp.Transactions, 1, "Bundle should have 1 tx") + bundle := resp.GetBundle() - receivedTx := resp.Transactions[0] + require.Len(t, bundle.Transactions, 1, "Bundle should have 1 tx") + + receivedTx := bundle.Transactions[0] sentTx := txs[txIndx] marshalledSentTx, err := sentTx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") require.True(t, bytes.Equal(receivedTx, marshalledSentTx), "Received tx does not match sent tx") txIndx += 1 - require.True(t, bytes.Equal(resp.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") - require.True(t, bytes.Equal(resp.BaseSequencerBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") + require.True(t, bytes.Equal(bundle.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") + require.True(t, bytes.Equal(bundle.BaseSequencerBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") } } @@ -375,12 +377,12 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. previousBlock := ethservice.BlockChain().CurrentSafeBlock() require.NotNil(t, previousBlock, "Previous block not found") - requestStreams := []*optimsticPb.StreamExecuteOptimisticBlockRequest{} + requestStreams := []*optimsticPb.ExecuteOptimisticBlockStreamRequest{} sequencerBlockHash := []byte("sequencer_block_hash") // create 1 stream item with 5 txs txs := []*types.Transaction{} - marshalledTxs := []*sequencerblockv1alpha1.RollupData{} + marshalledTxs := []*sequencerblockv1.RollupData{} for i := 0; i < 5; i++ { unsignedTx := types.NewTransaction(uint64(i), shared.TestToAddress, big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), shared.TestKey) @@ -389,8 +391,8 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. marshalledTx, err := tx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") - marshalledTxs = append(marshalledTxs, &sequencerblockv1alpha1.RollupData{ - Value: &sequencerblockv1alpha1.RollupData_SequencedData{SequencedData: marshalledTx}, + marshalledTxs = append(marshalledTxs, &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{SequencedData: marshalledTx}, }) } @@ -403,7 +405,7 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. require.Equal(t, pending, 5, "Mempool should have 5 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - req := optimsticPb.StreamExecuteOptimisticBlockRequest{Block: &optimsticPb.BaseBlock{ + req := optimsticPb.ExecuteOptimisticBlockStreamRequest{BaseBlock: &optimsticPb.BaseBlock{ SequencerBlockHash: sequencerBlockHash, Transactions: marshalledTxs, Timestamp: ×tamppb.Timestamp{ @@ -413,9 +415,9 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. requestStreams = append(requestStreams, &req) - mockStream := &MockBidirectionalStreaming[optimsticPb.StreamExecuteOptimisticBlockRequest, optimsticPb.StreamExecuteOptimisticBlockResponse]{ + mockStream := &MockBidirectionalStreaming[optimsticPb.ExecuteOptimisticBlockStreamRequest, optimsticPb.ExecuteOptimisticBlockStreamResponse]{ requestStream: requestStreams, - accumulatedResponses: []*optimsticPb.StreamExecuteOptimisticBlockResponse{}, + accumulatedResponses: []*optimsticPb.ExecuteOptimisticBlockStreamResponse{}, requestCounter: 0, } diff --git a/grpc/shared/container.go b/grpc/shared/container.go index da11722db..04e4568e5 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -23,7 +23,6 @@ type SharedServiceContainer struct { bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty - bridgeSenderAddress common.Address // address from which AstriaBridgeableERC20 contracts are called // TODO: bharath - we could make this an atomic pointer??? nextFeeRecipient common.Address // Fee recipient for the next block @@ -67,11 +66,12 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro nativeBridgeSeen = true } - if cfg.Erc20Asset != nil && bc.Config().AstriaBridgeSenderAddress == (common.Address{}) { + if cfg.Erc20Asset != nil && cfg.SenderAddress == (common.Address{}) { return nil, errors.New("astria bridge sender address must be set for bridged ERC20 assets") } - bridgeAddresses[cfg.BridgeAddress] = &cfg + bridgeCfg := cfg + bridgeAddresses[cfg.BridgeAddress] = &bridgeCfg bridgeAllowedAssets[cfg.AssetDenom] = struct{}{} if cfg.Erc20Asset == nil { log.Info("bridge for sequencer native asset initialized", "bridgeAddress", cfg.BridgeAddress, "assetDenom", cfg.AssetDenom) @@ -96,13 +96,11 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro } } } - sharedServiceContainer := &SharedServiceContainer{ eth: eth, bc: bc, bridgeAddresses: bridgeAddresses, bridgeAllowedAssets: bridgeAllowedAssets, - bridgeSenderAddress: bc.Config().AstriaBridgeSenderAddress, nextFeeRecipient: nextFeeRecipient, } @@ -161,7 +159,3 @@ func (s *SharedServiceContainer) BridgeAddresses() map[string]*params.AstriaBrid func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} { return s.bridgeAllowedAssets } - -func (s *SharedServiceContainer) BridgeSenderAddress() common.Address { - return s.bridgeSenderAddress -} diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index e27db7aab..a46032f4e 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -202,11 +202,7 @@ func TestSequenceTxValidation(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { -<<<<<<< HEAD:grpc/execution/validation_test.go - _, err := validateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.bridgeAddresses, serviceV1Alpha1.bridgeAllowedAssets) -======= - _, err := ValidateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), common.Address{}) ->>>>>>> 21f5aa7f7 (separate out execution api services and optimistic execution api services):grpc/shared/validation_test.go + _, err := ValidateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets()) if test.wantErr == "" && err == nil { return } From e5065e5c58aca17d4c1997294e3a84078f93b926 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 5 Nov 2024 11:48:48 +0530 Subject: [PATCH 43/79] update grpc methods --- grpc/optimistic/server.go | 4 ++-- grpc/optimistic/server_test.go | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 3d3f57ba2..74adefc75 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -54,7 +54,7 @@ func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceCon return optimisticService } -func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { +func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() @@ -97,7 +97,7 @@ func (o *OptimisticServiceV1Alpha1) StreamBundles(_ *optimsticPb.GetBundleStream } } -func (o *OptimisticServiceV1Alpha1) StreamExecuteOptimisticBlock(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { +func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index 730c4d22b..4addac955 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -260,7 +260,7 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { errorCh := make(chan error) go func(errorCh chan error) { - errorCh <- optimisticServiceV1Alpha1.StreamExecuteOptimisticBlock(mockBidirectionalStream) + errorCh <- optimisticServiceV1Alpha1.ExecuteOptimisticBlockStream(mockBidirectionalStream) }(errorCh) select { @@ -292,7 +292,7 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { errorCh = make(chan error) go func() { - errorCh <- optimisticServiceV1Alpha1.StreamBundles(&optimsticPb.GetBundleStreamRequest{}, &mockServerSideStreaming) + errorCh <- optimisticServiceV1Alpha1.GetBundleStream(&optimsticPb.GetBundleStreamRequest{}, &mockServerSideStreaming) }() stateDb, err := ethservice.BlockChain().StateAt(currentOptimisticBlock.Root) @@ -423,7 +423,7 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. errorCh := make(chan error) go func(errorCh chan error) { - errorCh <- optimisticServiceV1Alpha1.StreamExecuteOptimisticBlock(mockStream) + errorCh <- optimisticServiceV1Alpha1.ExecuteOptimisticBlockStream(mockStream) }(errorCh) select { From 0af0e924dfd3c3fb5d3ec556b8f85abf484736c3 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 11 Nov 2024 13:44:56 +0530 Subject: [PATCH 44/79] close the bundle stream when client closes the connection --- grpc/optimistic/server.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 74adefc75..d96e6a876 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -93,6 +93,10 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre case err := <-pendingTxEvent.Err(): return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) + + case <-stream.Context().Done(): + log.Debug("GetBundleStream stream closed with error", "err", stream.Context().Err()) + return stream.Context().Err() } } } From 258c8e21b02915d92b8ee87a36db5480a69c7451 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 16:55:50 +0530 Subject: [PATCH 45/79] remove UDS references --- cmd/geth/main.go | 1 - cmd/utils/flags.go | 9 --------- node/config.go | 21 +++++++-------------- node/config_test.go | 2 +- node/defaults.go | 5 ++--- node/grpcstack.go | 31 ++++++++----------------------- node/node.go | 11 +++-------- 7 files changed, 21 insertions(+), 59 deletions(-) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index f8c40bc58..162655190 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -191,7 +191,6 @@ var ( utils.GRPCEnabledFlag, utils.GRPCHostFlag, utils.GRPCPortFlag, - utils.GRPCUdsFlag, } metricsFlags = []cli.Flag{ diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 188a71184..4d55de18f 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -769,12 +769,6 @@ var ( Value: node.DefaultGRPCPort, Category: flags.APICategory, } - GRPCUdsFlag = &cli.StringFlag{ - Name: "grpc.uds", - Usage: "gRPC server UDS socket", - Value: node.DefaultGRPCUdsSocket, - Category: flags.APICategory, - } // Network Settings MaxPeersFlag = &cli.IntFlag{ @@ -1223,9 +1217,6 @@ func setGRPC(ctx *cli.Context, cfg *node.Config) { if ctx.IsSet(GRPCPortFlag.Name) { cfg.GRPCPort = ctx.Int(GRPCPortFlag.Name) } - if ctx.IsSet(GRPCUdsFlag.Name) { - cfg.GRPCUds = ctx.String(GRPCUdsFlag.Name) - } } } diff --git a/node/config.go b/node/config.go index 87013c002..9f83540b2 100644 --- a/node/config.go +++ b/node/config.go @@ -195,8 +195,6 @@ type Config struct { GRPCHost string `toml:",omitempty"` // GRPCPort is the TCP port number on which to start the gRPC server. GRPCPort int `toml:",omitempty"` - // GRPCUds is the Unix domain socket path on which to start the gRPC server. - GRPCUds string `toml:",omitempty"` // Logger is a custom logger to use with the p2p.Server. Logger log.Logger `toml:",omitempty"` @@ -275,35 +273,30 @@ func (c *Config) HTTPEndpoint() string { return net.JoinHostPort(c.HTTPHost, fmt.Sprintf("%d", c.HTTPPort)) } -// GRPCTcpEndpoint resolves a gRPC TCP endpoint based on the configured host interface +// GRPCEndpoint resolves a gRPC TCP endpoint based on the configured host interface // and port parameters. -func (c *Config) GRPCTcpEndpoint() string { +func (c *Config) GRPCEndpoint() string { if c.GRPCHost == "" { return "" } return fmt.Sprintf("%s:%d", c.GRPCHost, c.GRPCPort) } -// GRPCUdsEndpoint resolves a gRPC Unix domain socket endpoint based on the configured path. -func (c *Config) GRPCUdsEndpoint() string { - return c.GRPCUds -} - -// DefaultHTTPEndpoint returns the HTTP tcpEndpoint used by default. +// DefaultHTTPEndpoint returns the HTTP endpoint used by default. func DefaultHTTPEndpoint() string { config := &Config{HTTPHost: DefaultHTTPHost, HTTPPort: DefaultHTTPPort, AuthPort: DefaultAuthPort} return config.HTTPEndpoint() } -// DefaultGRPCEndpoint returns the gRPC tcpEndpoint used by default. +// DefaultGRPCEndpoint returns the gRPC endpoint used by default. // NOTE - implemented this to be consistent with DefaultHTTPEndpoint, but // neither are ever used func DefaultGRPCEndpoint() string { config := &Config{GRPCHost: DefaultGRPCHost, GRPCPort: DefaultGRPCPort} - return config.GRPCTcpEndpoint() + return config.GRPCEndpoint() } -// WSEndpoint resolves a websocket tcpEndpoint based on the configured host interface +// WSEndpoint resolves a websocket endpoint based on the configured host interface // and port parameters. func (c *Config) WSEndpoint() string { if c.WSHost == "" { @@ -312,7 +305,7 @@ func (c *Config) WSEndpoint() string { return net.JoinHostPort(c.WSHost, fmt.Sprintf("%d", c.WSPort)) } -// DefaultWSEndpoint returns the websocket tcpEndpoint used by default. +// DefaultWSEndpoint returns the websocket endpoint used by default. func DefaultWSEndpoint() string { config := &Config{WSHost: DefaultWSHost, WSPort: DefaultWSPort} return config.WSEndpoint() diff --git a/node/config_test.go b/node/config_test.go index 9cfda04d6..e8af8ddcd 100644 --- a/node/config_test.go +++ b/node/config_test.go @@ -94,7 +94,7 @@ func TestIPCPathResolution(t *testing.T) { // Only run when platform/test match if (runtime.GOOS == "windows") == test.Windows { if endpoint := (&Config{DataDir: test.DataDir, IPCPath: test.IPCPath}).IPCEndpoint(); endpoint != test.Endpoint { - t.Errorf("test %d: IPC tcpEndpoint mismatch: have %s, want %s", i, endpoint, test.Endpoint) + t.Errorf("test %d: IPC endpoint mismatch: have %s, want %s", i, endpoint, test.Endpoint) } } } diff --git a/node/defaults.go b/node/defaults.go index 0c0d2c935..fd3d45e01 100644 --- a/node/defaults.go +++ b/node/defaults.go @@ -35,9 +35,8 @@ const ( DefaultAuthHost = "localhost" // Default host interface for the authenticated apis DefaultAuthPort = 8551 // Default port for the authenticated apis // grpc - DefaultGRPCHost = "[::1]" // Default host interface for the gRPC server for the execution api - DefaultGRPCPort = 50051 // Default port for the gRPC server for the execution api - DefaultGRPCUdsSocket = "/tmp/auctioneer.sock" // Default UDS socket for the gRPC auctioneer streams + DefaultGRPCHost = "[::1]" // Default host interface for the gRPC server for the execution api + DefaultGRPCPort = 50051 // Default port for the gRPC server for the execution api ) const ( diff --git a/node/grpcstack.go b/node/grpcstack.go index 9b4d3ffeb..52281eb42 100644 --- a/node/grpcstack.go +++ b/node/grpcstack.go @@ -3,7 +3,6 @@ package node import ( optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" "net" - "os" "sync" astriaGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/execution/v1/executionv1grpc" @@ -16,8 +15,7 @@ import ( type GRPCServerHandler struct { mu sync.Mutex - tcpEndpoint string - udsEndpoint string + endpoint string execServer *grpc.Server optimisticServer *grpc.Server executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer @@ -31,11 +29,10 @@ type GRPCServerHandler struct { func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, streamBundleServ optimisticGrpc.BundleServiceServer, cfg *Config) error { execServer, optimisticServer := grpc.NewServer(), grpc.NewServer() - log.Info("gRPC server enabled", "tcpEndpoint", cfg.GRPCTcpEndpoint(), "udsEndpoint", cfg.GRPCUdsEndpoint()) + log.Info("gRPC server enabled", "endpoint", cfg.GRPCEndpoint()) serverHandler := &GRPCServerHandler{ - tcpEndpoint: cfg.GRPCTcpEndpoint(), - udsEndpoint: cfg.GRPCUdsEndpoint(), + endpoint: cfg.GRPCEndpoint(), execServer: execServer, optimisticServer: optimisticServer, executionServiceServerV1a2: &execServ, @@ -56,31 +53,19 @@ func (handler *GRPCServerHandler) Start() error { handler.mu.Lock() defer handler.mu.Unlock() - if handler.tcpEndpoint == "" { - return nil - } - if handler.udsEndpoint == "" { + if handler.endpoint == "" { return nil } // Start the gRPC server - tcpLis, err := net.Listen("tcp", handler.tcpEndpoint) - if err != nil { - return err - } - - // Remove any existing socket file - if err := os.RemoveAll(handler.udsEndpoint); err != nil { - return err - } - udsLis, err := net.Listen("unix", handler.udsEndpoint) + tcpLis, err := net.Listen("tcp", handler.endpoint) if err != nil { return err } go handler.execServer.Serve(tcpLis) - go handler.optimisticServer.Serve(udsLis) - log.Info("gRPC server started", "tcpEndpoint", handler.tcpEndpoint, "udsEndpoint", handler.udsEndpoint) + go handler.optimisticServer.Serve(tcpLis) + log.Info("gRPC server started", "endpoint", handler.endpoint) return nil } @@ -91,6 +76,6 @@ func (handler *GRPCServerHandler) Stop() error { handler.execServer.GracefulStop() handler.optimisticServer.GracefulStop() - log.Info("gRPC server stopped", "tcpEndpoint", handler.tcpEndpoint, "udsEndpoint", handler.udsEndpoint) + log.Info("gRPC server stopped", "endpoint", handler.endpoint) return nil } diff --git a/node/node.go b/node/node.go index 02a91a8ba..dc56b3361 100644 --- a/node/node.go +++ b/node/node.go @@ -724,14 +724,9 @@ func (n *Node) HTTPEndpoint() string { return "http://" + n.http.listenAddr() } -// GRPCTcpEndpoint returns the URL of the GRPC server. -func (n *Node) GRPCTcpEndpoint() string { - return "http://" + n.grpcServerHandler.tcpEndpoint -} - -// GRPCUdsEndpoint returns the URL of the GRPC server UDS endpoint -func (n *Node) GRPCUdsEndpoint() string { - return n.grpcServerHandler.udsEndpoint +// GRPCEndpoint returns the URL of the GRPC server. +func (n *Node) GRPCEndpoint() string { + return "http://" + n.grpcServerHandler.endpoint } // WSEndpoint returns the current JSON-RPC over WebSocket endpoint. From 54381f4206014d949a009aa7dc32a00f5dd0b9fc Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 16:57:53 +0530 Subject: [PATCH 46/79] rename a wrongly renamed word --- core/vm/memory.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/vm/memory.go b/core/vm/memory.go index 3ab292000..e0202fd7c 100644 --- a/core/vm/memory.go +++ b/core/vm/memory.go @@ -34,7 +34,7 @@ func NewMemory() *Memory { // Set sets offset + size to value func (m *Memory) Set(offset, size uint64, value []byte) { // It's possible the offset is greater than 0 and size equals 0. This is because - // the calcMemSize (container.go) could potentially return 0 when size is zero (NO-OP) + // the calcMemSize (common.go) could potentially return 0 when size is zero (NO-OP) if size > 0 { // length of store may never be less than offset + size. // The store should be resized PRIOR to setting the memory From f28ac18c9c164890bc36d062d86f166af33c6c5f Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 17:14:48 +0530 Subject: [PATCH 47/79] re add single client connection checks --- grpc/optimistic/server.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index d96e6a876..18e90b160 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -35,6 +35,9 @@ type OptimisticServiceV1Alpha1 struct { sharedServiceContainer *shared.SharedServiceContainer currentOptimisticSequencerBlock atomic.Pointer[[]byte] + + executeOptimisticBlockStreamConnected atomic.Bool + bundleStreamConnected atomic.Bool } var ( @@ -55,6 +58,13 @@ func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceCon } func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { + if !o.bundleStreamConnected.CompareAndSwap(false, true) { + return status.Error(codes.PermissionDenied, "Bundle stream has already been connected to") + } + + // when the stream is closed, we need to set the bundleStreamConnected to false + defer o.bundleStreamConnected.Store(false) + pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() @@ -102,6 +112,13 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre } func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { + if !o.executeOptimisticBlockStreamConnected.CompareAndSwap(false, true) { + return status.Error(codes.PermissionDenied, "Execute optimsitic block stream has already been connected to") + } + + // when the stream is closed, we need to set the bundleStreamConnected to false + defer o.executeOptimisticBlockStreamConnected.Store(false) + mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() From 12eec4dc2bf0b48039343cc81990eed7967056a7 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 20:53:30 +0530 Subject: [PATCH 48/79] maintain only 1 server instance --- node/grpcstack.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/node/grpcstack.go b/node/grpcstack.go index 52281eb42..2f8d7b091 100644 --- a/node/grpcstack.go +++ b/node/grpcstack.go @@ -17,7 +17,6 @@ type GRPCServerHandler struct { endpoint string execServer *grpc.Server - optimisticServer *grpc.Server executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer optimisticExecServ *optimisticGrpc.OptimisticExecutionServiceServer streamBundleServ *optimisticGrpc.BundleServiceServer @@ -27,22 +26,21 @@ type GRPCServerHandler struct { // It registers the execution service server. // It registers the gRPC server with the node so it can be stopped on shutdown. func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, streamBundleServ optimisticGrpc.BundleServiceServer, cfg *Config) error { - execServer, optimisticServer := grpc.NewServer(), grpc.NewServer() + execServer := grpc.NewServer() log.Info("gRPC server enabled", "endpoint", cfg.GRPCEndpoint()) serverHandler := &GRPCServerHandler{ endpoint: cfg.GRPCEndpoint(), execServer: execServer, - optimisticServer: optimisticServer, executionServiceServerV1a2: &execServ, optimisticExecServ: &optimisticExecServ, streamBundleServ: &streamBundleServ, } astriaGrpc.RegisterExecutionServiceServer(execServer, execServ) - optimisticGrpc.RegisterOptimisticExecutionServiceServer(optimisticServer, optimisticExecServ) - optimisticGrpc.RegisterBundleServiceServer(optimisticServer, streamBundleServ) + optimisticGrpc.RegisterOptimisticExecutionServiceServer(execServer, optimisticExecServ) + optimisticGrpc.RegisterBundleServiceServer(execServer, streamBundleServ) node.RegisterGRPCServer(serverHandler) return nil @@ -64,7 +62,6 @@ func (handler *GRPCServerHandler) Start() error { } go handler.execServer.Serve(tcpLis) - go handler.optimisticServer.Serve(tcpLis) log.Info("gRPC server started", "endpoint", handler.endpoint) return nil } @@ -75,7 +72,6 @@ func (handler *GRPCServerHandler) Stop() error { defer handler.mu.Unlock() handler.execServer.GracefulStop() - handler.optimisticServer.GracefulStop() log.Info("gRPC server stopped", "endpoint", handler.endpoint) return nil } From 651a6caef293a30efcae0f96819b7483cce359f6 Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 20 Nov 2024 12:34:43 +0530 Subject: [PATCH 49/79] renaming --- grpc/execution/server.go | 4 ++-- grpc/optimistic/server_test.go | 20 ++++++++++---------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 3f4d5c5be..8f65765d6 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -62,11 +62,11 @@ var ( ) func NewExecutionServiceServerV1(sharedServiceContainer *shared.SharedServiceContainer) *ExecutionServiceServerV1 { - execServiceServerV1Alpha2 := &ExecutionServiceServerV1{ + execServiceServerV1 := &ExecutionServiceServerV1{ sharedServiceContainer: sharedServiceContainer, } - return execServiceServerV1Alpha2 + return execServiceServerV1 } func (s *ExecutionServiceServerV1) GetGenesisInfo(ctx context.Context, req *astriaPb.GetGenesisInfoRequest) (*astriaPb.GenesisInfo, error) { diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index 4addac955..cbcb562fc 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -70,18 +70,18 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { // reset the blockchain with each test optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) - executionServiceV1Alpha1 := execution.SetupExecutionService(t, sharedService) + executionServiceV1 := execution.SetupExecutionService(t, sharedService) var err error // adding this to prevent shadowing of genesisInfo in the below if branch var genesisInfo *astriaPb.GenesisInfo var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState if tt.callGenesisInfoAndGetCommitmentState { // call getGenesisInfo and getCommitmentState before calling executeBlock - genesisInfo, err = executionServiceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err = executionServiceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") - commitmentStateBeforeExecuteBlock, err = executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentStateBeforeExecuteBlock, err = executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil") } @@ -158,7 +158,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") // check if commitment state is not updated - commitmentStateAfterExecuteBlock, err := executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentStateAfterExecuteBlock, err := executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated") @@ -197,15 +197,15 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { ethservice, sharedService := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) - executionServiceV1Alpha1 := execution.SetupExecutionService(t, sharedService) + executionServiceV1 := execution.SetupExecutionService(t, sharedService) // call genesis info - genesisInfo, err := executionServiceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := executionServiceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") // call get commitment state - commitmentState, err := executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") @@ -361,15 +361,15 @@ func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing. ethservice, sharedService := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) - executionServiceV1Alpha1 := execution.SetupExecutionService(t, sharedService) + executionServiceV1 := execution.SetupExecutionService(t, sharedService) // call genesis info - genesisInfo, err := executionServiceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := executionServiceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") // call get commitment state - commitmentState, err := executionServiceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := executionServiceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") From 9bf573a0bc1ada31511bce8be8182f84a30b39a2 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 3 Dec 2024 13:07:58 +0530 Subject: [PATCH 50/79] remove atomic bools to restrict client connections to 1 --- grpc/optimistic/server.go | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 18e90b160..d96e6a876 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -35,9 +35,6 @@ type OptimisticServiceV1Alpha1 struct { sharedServiceContainer *shared.SharedServiceContainer currentOptimisticSequencerBlock atomic.Pointer[[]byte] - - executeOptimisticBlockStreamConnected atomic.Bool - bundleStreamConnected atomic.Bool } var ( @@ -58,13 +55,6 @@ func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceCon } func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { - if !o.bundleStreamConnected.CompareAndSwap(false, true) { - return status.Error(codes.PermissionDenied, "Bundle stream has already been connected to") - } - - // when the stream is closed, we need to set the bundleStreamConnected to false - defer o.bundleStreamConnected.Store(false) - pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() @@ -112,13 +102,6 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre } func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { - if !o.executeOptimisticBlockStreamConnected.CompareAndSwap(false, true) { - return status.Error(codes.PermissionDenied, "Execute optimsitic block stream has already been connected to") - } - - // when the stream is closed, we need to set the bundleStreamConnected to false - defer o.executeOptimisticBlockStreamConnected.Store(false) - mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() From 6abc0f4ebfff7749bfd876459213ac4d0b369539 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 20:54:21 +0530 Subject: [PATCH 51/79] feature flag auctioneer --- cmd/utils/flags.go | 7 +++ core/txpool/legacypool/legacypool.go | 53 ++++++++++++++-------- core/txpool/legacypool/legacypool2_test.go | 8 ++-- core/txpool/legacypool/legacypool_test.go | 38 ++++++++-------- core/txpool/txpool.go | 32 +++++++++---- eth/backend.go | 4 +- eth/protocols/eth/handler_test.go | 4 +- grpc/shared/test_utils.go | 4 +- miner/miner_test.go | 19 ++++---- miner/payload_building_test.go | 4 +- node/config.go | 2 + node/grpcstack.go | 11 ++++- node/node.go | 10 ++++ 13 files changed, 129 insertions(+), 67 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 4d55de18f..6a01d0961 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -770,6 +770,13 @@ var ( Category: flags.APICategory, } + // auctioneer + AuctioneerEnabledFlag = &cli.BoolFlag{ + Name: "auctioneer", + Usage: "Enable the auctioneer server", + Category: flags.MinerCategory, + } + // Network Settings MaxPeersFlag = &cli.IntFlag{ Name: "maxpeers", diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index b6e769452..81dfb958e 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -239,6 +239,8 @@ type LegacyPool struct { initDoneCh chan struct{} // is closed once the pool is initialized (for tests) changesSinceReorg int // A counter for how many drops we've performed in-between reorg. + + auctioneerEnabled bool } type txpoolResetRequest struct { @@ -247,26 +249,27 @@ type txpoolResetRequest struct { // New creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func New(config Config, chain BlockChain) *LegacyPool { +func New(config Config, chain BlockChain, auctioneerEnabled bool) *LegacyPool { // Sanitize the input to ensure no vulnerable gas prices are set config = (&config).sanitize() // Create the transaction pool with its initial settings pool := &LegacyPool{ - config: config, - chain: chain, - chainconfig: chain.Config(), - signer: types.LatestSigner(chain.Config()), - pending: make(map[common.Address]*list), - queue: make(map[common.Address]*list), - beats: make(map[common.Address]time.Time), - all: newLookup(), - reqResetCh: make(chan *txpoolResetRequest), - reqPromoteCh: make(chan *accountSet), - queueTxEventCh: make(chan *types.Transaction), - reorgDoneCh: make(chan chan struct{}), - reorgShutdownCh: make(chan struct{}), - initDoneCh: make(chan struct{}), + config: config, + chain: chain, + chainconfig: chain.Config(), + signer: types.LatestSigner(chain.Config()), + pending: make(map[common.Address]*list), + queue: make(map[common.Address]*list), + beats: make(map[common.Address]time.Time), + all: newLookup(), + reqResetCh: make(chan *txpoolResetRequest), + reqPromoteCh: make(chan *accountSet), + queueTxEventCh: make(chan *types.Transaction), + reorgDoneCh: make(chan chan struct{}), + reorgShutdownCh: make(chan struct{}), + initDoneCh: make(chan struct{}), + auctioneerEnabled: auctioneerEnabled, } pool.locals = newAccountSet(pool.signer) for _, addr := range config.Locals { @@ -1373,8 +1376,16 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, } pool.mu.Lock() if reset != nil { - // Reset from the old head to the new, rescheduling any reorged transactions - pool.reset(reset.oldHead, reset.newHead) + // only reset the state root and the head of the txpool when we are running the auctioneer node. + // when we are not running the auctioneer node, we re-inject any re-orged transactions which is similar + // to the current functionality of geth + if pool.auctioneerEnabled { + // only reset from the old head to the new head + pool.resetHeadOnly(reset.oldHead, reset.newHead) + } else { + // Reset from the old head to the new, rescheduling any reorged transactions + pool.reset(reset.oldHead, reset.newHead) + } // Nonces were reset, discard any events that became stale for addr := range events { @@ -1395,7 +1406,13 @@ func (pool *LegacyPool) runReorg(done chan struct{}, reset *txpoolResetRequest, // remove any transaction that has been included in the block or was invalidated // because of another transaction (e.g. higher gas price). if reset != nil { - pool.clearPendingAndQueued(reset.newHead) + if pool.auctioneerEnabled { + // if we are running the pool as an auctioneer, then we should clear the mempool each time the head + // is reset + pool.clearPendingAndQueued(reset.newHead) + } else { + pool.demoteUnexecutables() + } if reset.newHead != nil { if pool.chainconfig.IsLondon(new(big.Int).Add(reset.newHead.Number, big.NewInt(1))) { pendingBaseFee := eip1559.CalcBaseFee(pool.chainconfig, reset.newHead) diff --git a/core/txpool/legacypool/legacypool2_test.go b/core/txpool/legacypool/legacypool2_test.go index fd961d1d9..d0e1d0e04 100644 --- a/core/txpool/legacypool/legacypool2_test.go +++ b/core/txpool/legacypool/legacypool2_test.go @@ -85,7 +85,7 @@ func TestTransactionFutureAttack(t *testing.T) { config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() fillPool(t, pool) @@ -119,7 +119,7 @@ func TestTransactionFuture1559(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -152,7 +152,7 @@ func TestTransactionZAttack(t *testing.T) { // Create the pool to test the pricing enforcement with statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() // Create a number of test accounts, fund them and make transactions @@ -223,7 +223,7 @@ func BenchmarkFutureAttack(b *testing.B) { config := testTxPoolConfig config.GlobalQueue = 100 config.GlobalSlots = 100 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() fillPool(b, pool) diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index aa9fe6c92..c3bf450a9 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -168,7 +168,7 @@ func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.Privat blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed)) key, _ := crypto.GenerateKey() - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) if err := pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()); err != nil { panic(err) } @@ -284,7 +284,7 @@ func TestStateChangeDuringReset(t *testing.T) { tx0 := transaction(0, 100000, key) tx1 := transaction(1, 100000, key) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -756,7 +756,7 @@ func TestPostponing(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -943,7 +943,7 @@ func testQueueGlobalLimiting(t *testing.T, nolocals bool) { config.NoLocals = nolocals config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1037,7 +1037,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) { config.Lifetime = time.Second config.NoLocals = nolocals - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1189,7 +1189,7 @@ func TestPendingGlobalLimiting(t *testing.T) { config := testTxPoolConfig config.GlobalSlots = config.AccountSlots * 10 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1293,7 +1293,7 @@ func TestCapClearsFromAll(t *testing.T) { config.AccountQueue = 2 config.GlobalSlots = 8 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1326,7 +1326,7 @@ func TestPendingMinimumAllowance(t *testing.T) { config := testTxPoolConfig config.GlobalSlots = 1 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1372,7 +1372,7 @@ func TestRepricing(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1494,7 +1494,7 @@ func TestMinGasPriceEnforced(t *testing.T) { txPoolConfig := DefaultConfig txPoolConfig.NoLocals = true - pool := New(txPoolConfig, blockchain) + pool := New(txPoolConfig, blockchain, true) pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1665,7 +1665,7 @@ func TestRepricingKeepsLocals(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1743,7 +1743,7 @@ func TestUnderpricing(t *testing.T) { config.GlobalSlots = 2 config.GlobalQueue = 2 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -1858,7 +1858,7 @@ func TestStableUnderpricing(t *testing.T) { config.GlobalSlots = 128 config.GlobalQueue = 0 - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -2087,7 +2087,7 @@ func TestDeduplication(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -2154,7 +2154,7 @@ func TestReplacement(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() @@ -2366,7 +2366,7 @@ func testJournaling(t *testing.T, nolocals bool) { config.Journal = journal config.Rejournal = time.Second - pool := New(config, blockchain) + pool := New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) // Create two test accounts to ensure remotes expire but locals do not @@ -2404,7 +2404,7 @@ func testJournaling(t *testing.T, nolocals bool) { statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) + pool = New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) pending, queued = pool.Stats() @@ -2436,7 +2436,7 @@ func testJournaling(t *testing.T, nolocals bool) { statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool = New(config, blockchain) + pool = New(config, blockchain, true) pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) // tx mempool is cleared out completely after a reset @@ -2474,7 +2474,7 @@ func TestStatusCheck(t *testing.T) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - pool := New(testTxPoolConfig, blockchain) + pool := New(testTxPoolConfig, blockchain, true) pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) defer pool.Close() diff --git a/core/txpool/txpool.go b/core/txpool/txpool.go index 883af5635..432433a96 100644 --- a/core/txpool/txpool.go +++ b/core/txpool/txpool.go @@ -78,22 +78,25 @@ type TxPool struct { term chan struct{} // Termination channel to detect a closed pool sync chan chan error // Testing / simulator channel to block until internal reset is done + + auctioneerEnabled bool } // New creates a new transaction pool to gather, sort and filter inbound // transactions from the network. -func New(gasTip uint64, chain BlockChain, subpools []SubPool) (*TxPool, error) { +func New(gasTip uint64, chain BlockChain, subpools []SubPool, auctioneerEnabled bool) (*TxPool, error) { // Retrieve the current head so that all subpools and this main coordinator // pool will have the same starting state, even if the chain moves forward // during initialization. head := chain.CurrentBlock() pool := &TxPool{ - subpools: subpools, - reservations: make(map[common.Address]SubPool), - quit: make(chan chan error), - term: make(chan struct{}), - sync: make(chan chan error), + subpools: subpools, + reservations: make(map[common.Address]SubPool), + quit: make(chan chan error), + term: make(chan struct{}), + sync: make(chan chan error), + auctioneerEnabled: auctioneerEnabled, } for i, subpool := range subpools { if err := subpool.Init(gasTip, head, pool.reserver(i, subpool)); err != nil { @@ -192,6 +195,12 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) { ) defer newOptimisticHeadSub.Unsubscribe() + var ( + newHeadCh = make(chan core.ChainHeadEvent) + newHeadSub = chain.SubscribeChainHeadEvent(newHeadCh) + ) + defer newHeadSub.Unsubscribe() + // Track the previous and current head to feed to an idle reset var ( oldHead = head @@ -245,8 +254,15 @@ func (p *TxPool) loop(head *types.Header, chain BlockChain) { // Wait for the next chain head event or a previous reset finish select { case event := <-newOptimisticHeadCh: - // Chain moved forward, store the head for later consumption - newHead = event.Block.Header() + if p.auctioneerEnabled { + // Chain moved forward, store the head for later consumption + newHead = event.Block.Header() + } + case event := <-newHeadCh: + if !p.auctioneerEnabled { + // Chain moved forward, store the head for later consumption + newHead = event.Block.Header() + } case head := <-resetDone: // Previous reset finished, update the old head and allow a new reset diff --git a/eth/backend.go b/eth/backend.go index bea001c68..b24c4bf46 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -235,9 +235,9 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) { if config.TxPool.Journal != "" { config.TxPool.Journal = stack.ResolvePath(config.TxPool.Journal) } - legacyPool := legacypool.New(config.TxPool, eth.blockchain) + legacyPool := legacypool.New(config.TxPool, eth.blockchain, stack.AuctioneerEnabled()) - eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}) + eth.txPool, err = txpool.New(config.TxPool.PriceLimit, eth.blockchain, []txpool.SubPool{legacyPool, blobPool}, stack.AuctioneerEnabled()) if err != nil { return nil, err } diff --git a/eth/protocols/eth/handler_test.go b/eth/protocols/eth/handler_test.go index 934dadc9a..84cac22f8 100644 --- a/eth/protocols/eth/handler_test.go +++ b/eth/protocols/eth/handler_test.go @@ -116,8 +116,8 @@ func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, txconfig := legacypool.DefaultConfig txconfig.Journal = "" // Don't litter the disk with test journals - pool := legacypool.New(txconfig, chain) - txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool}) + pool := legacypool.New(txconfig, chain, true) + txpool, _ := txpool.New(txconfig.PriceLimit, chain, []txpool.SubPool{pool}, true) return &testBackend{ db: db, diff --git a/grpc/shared/test_utils.go b/grpc/shared/test_utils.go index 82033ea5c..0c95927a7 100644 --- a/grpc/shared/test_utils.go +++ b/grpc/shared/test_utils.go @@ -119,7 +119,9 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri // startEthService creates a full node instance for testing. func StartEthService(t *testing.T, genesis *core.Genesis) *eth.Ethereum { - n, err := node.New(&node.Config{}) + n, err := node.New(&node.Config{ + EnableAuctioneer: true, + }) require.Nil(t, err, "can't create node") mcfg := miner.DefaultConfig mcfg.PendingFeeRecipient = TestAddr diff --git a/miner/miner_test.go b/miner/miner_test.go index 3dc39f175..7cd74cb70 100644 --- a/miner/miner_test.go +++ b/miner/miner_test.go @@ -59,11 +59,12 @@ func (m *mockBackend) TxPool() *txpool.TxPool { } type testBlockChain struct { - root common.Hash - config *params.ChainConfig - statedb *state.StateDB - gasLimit uint64 - chainHeadFeed *event.Feed + root common.Hash + config *params.ChainConfig + statedb *state.StateDB + gasLimit uint64 + chainHeadFeed *event.Feed + chainOptimisticHeadFeed *event.Feed } func (bc *testBlockChain) Config() *params.ChainConfig { @@ -94,7 +95,7 @@ func (bc *testBlockChain) SubscribeChainHeadEvent(ch chan<- core.ChainHeadEvent) } func (bc *testBlockChain) SubscribeChainOptimisticHeadEvent(ch chan<- core.ChainOptimisticHeadEvent) event.Subscription { - return bc.chainHeadFeed.Subscribe(ch) + return bc.chainOptimisticHeadFeed.Subscribe(ch) } func TestBuildPendingBlocks(t *testing.T) { @@ -161,10 +162,10 @@ func createMiner(t *testing.T) *Miner { t.Fatalf("can't create new chain %v", err) } statedb, _ := state.New(bc.Genesis().Root(), bc.StateCache(), nil) - blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed)} + blockchain := &testBlockChain{bc.Genesis().Root(), chainConfig, statedb, 10000000, new(event.Feed), new(event.Feed)} - pool := legacypool.New(testTxPoolConfig, blockchain) - txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool}) + pool := legacypool.New(testTxPoolConfig, blockchain, true) + txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, blockchain, []txpool.SubPool{pool}, true) // Create Miner backend := NewMockBackend(bc, txpool) diff --git a/miner/payload_building_test.go b/miner/payload_building_test.go index 3ba7b6ccc..8e7f46e3a 100644 --- a/miner/payload_building_test.go +++ b/miner/payload_building_test.go @@ -124,8 +124,8 @@ func newTestWorkerBackend(t *testing.T, chainConfig *params.ChainConfig, engine if err != nil { t.Fatalf("core.NewBlockChain failed: %v", err) } - pool := legacypool.New(testTxPoolConfig, chain) - txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool}) + pool := legacypool.New(testTxPoolConfig, chain, true) + txpool, _ := txpool.New(testTxPoolConfig.PriceLimit, chain, []txpool.SubPool{pool}, true) return &testWorkerBackend{ db: db, diff --git a/node/config.go b/node/config.go index 9f83540b2..11a54c1e6 100644 --- a/node/config.go +++ b/node/config.go @@ -217,6 +217,8 @@ type Config struct { EnablePersonal bool `toml:"-"` DBEngine string `toml:",omitempty"` + + EnableAuctioneer bool `toml:",omitempty"` } // IPCEndpoint resolves an IPC endpoint based on a configured value, taking into diff --git a/node/grpcstack.go b/node/grpcstack.go index 2f8d7b091..b3a34c2ca 100644 --- a/node/grpcstack.go +++ b/node/grpcstack.go @@ -20,6 +20,8 @@ type GRPCServerHandler struct { executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer optimisticExecServ *optimisticGrpc.OptimisticExecutionServiceServer streamBundleServ *optimisticGrpc.BundleServiceServer + + enableAuctioneer bool } // NewServer creates a new gRPC server. @@ -36,11 +38,14 @@ func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer executionServiceServerV1a2: &execServ, optimisticExecServ: &optimisticExecServ, streamBundleServ: &streamBundleServ, + enableAuctioneer: cfg.EnableAuctioneer, } astriaGrpc.RegisterExecutionServiceServer(execServer, execServ) - optimisticGrpc.RegisterOptimisticExecutionServiceServer(execServer, optimisticExecServ) - optimisticGrpc.RegisterBundleServiceServer(execServer, streamBundleServ) + if cfg.EnableAuctioneer { + optimisticGrpc.RegisterOptimisticExecutionServiceServer(execServer, optimisticExecServ) + optimisticGrpc.RegisterBundleServiceServer(execServer, streamBundleServ) + } node.RegisterGRPCServer(serverHandler) return nil @@ -62,6 +67,7 @@ func (handler *GRPCServerHandler) Start() error { } go handler.execServer.Serve(tcpLis) + log.Info("gRPC server started", "endpoint", handler.endpoint) return nil } @@ -72,6 +78,7 @@ func (handler *GRPCServerHandler) Stop() error { defer handler.mu.Unlock() handler.execServer.GracefulStop() + log.Info("gRPC server stopped", "endpoint", handler.endpoint) return nil } diff --git a/node/node.go b/node/node.go index dc56b3361..0b19df5db 100644 --- a/node/node.go +++ b/node/node.go @@ -69,6 +69,8 @@ type Node struct { // grpc grpcServerHandler *GRPCServerHandler // Stores information about the grpc server + enableAuctioneer bool + databases map[*closeTrackingDB]struct{} // All open databases } @@ -159,6 +161,10 @@ func New(conf *Config) (*Node, error) { node.wsAuth = newHTTPServer(node.log, rpc.DefaultHTTPTimeouts) node.ipc = newIPCServer(node.log, conf.IPCEndpoint()) + if conf.EnableAuctioneer { + node.enableAuctioneer = true + } + return node, nil } @@ -756,6 +762,10 @@ func (n *Node) EventMux() *event.TypeMux { return n.eventmux } +func (n *Node) AuctioneerEnabled() bool { + return n.enableAuctioneer +} + // OpenDatabase opens an existing database with the given name (or creates one if no // previous can be found) from within the node's instance directory. If the node is // ephemeral, a memory database is returned. From e48458f9398fd50afce4ca808c48da47513f1c6e Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 29 Oct 2024 16:11:23 +0530 Subject: [PATCH 52/79] add flags --- cmd/geth/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/geth/main.go b/cmd/geth/main.go index 162655190..a417a9753 100644 --- a/cmd/geth/main.go +++ b/cmd/geth/main.go @@ -123,6 +123,7 @@ var ( utils.MinerRecommitIntervalFlag, utils.MinerPendingFeeRecipientFlag, utils.MinerNewPayloadTimeoutFlag, // deprecated + utils.AuctioneerEnabledFlag, utils.NATFlag, utils.NoDiscoverFlag, utils.DiscoveryV4Flag, From 591e8277762a20d4c65c4999d94d4ad4429f8f3f Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 17:16:54 +0530 Subject: [PATCH 53/79] save --- cmd/utils/flags.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 6a01d0961..15fd5e580 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -1446,6 +1446,12 @@ func SetNodeConfig(ctx *cli.Context, cfg *node.Config) { SetDataDir(ctx, cfg) setSmartCard(ctx, cfg) + if ctx.Bool(AuctioneerEnabledFlag.Name) { + cfg.EnableAuctioneer = true + } else { + cfg.EnableAuctioneer = false + } + if ctx.IsSet(JWTSecretFlag.Name) { cfg.JWTSecret = ctx.String(JWTSecretFlag.Name) } From 7444772625b0cc2dd7363cba4a29daf8f73233d1 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 12 Nov 2024 22:10:28 +0530 Subject: [PATCH 54/79] maintain a copy of legacy pool tests to test the cases when auctioneer is not enabled --- .../legacypool_no_auctioneer_test.go | 2534 +++++++++++++++++ core/txpool/legacypool/legacypool_test.go | 149 +- 2 files changed, 2563 insertions(+), 120 deletions(-) create mode 100644 core/txpool/legacypool/legacypool_no_auctioneer_test.go diff --git a/core/txpool/legacypool/legacypool_no_auctioneer_test.go b/core/txpool/legacypool/legacypool_no_auctioneer_test.go new file mode 100644 index 000000000..1f70b5334 --- /dev/null +++ b/core/txpool/legacypool/legacypool_no_auctioneer_test.go @@ -0,0 +1,2534 @@ +// Copyright 2015 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package legacypool + +import ( + "crypto/ecdsa" + "errors" + "math/big" + "math/rand" + "os" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" + "github.com/ethereum/go-ethereum/core/tracing" + "github.com/ethereum/go-ethereum/core/txpool" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/event" + "github.com/ethereum/go-ethereum/params" + "github.com/holiman/uint256" +) + +func init() { + testTxPoolConfig = DefaultConfig + testTxPoolConfig.Journal = "" + + cpy := *params.TestChainConfig + eip1559Config = &cpy + eip1559Config.BerlinBlock = common.Big0 + eip1559Config.LondonBlock = common.Big0 +} + +// This test simulates a scenario where a new block is imported during a +// state reset and tests whether the pending state is in sync with the +// block head event that initiated the resetState(). +func TestStateChangeDuringResetNoAuctioneer(t *testing.T) { + t.Parallel() + + var ( + key, _ = crypto.GenerateKey() + address = crypto.PubkeyToAddress(key.PublicKey) + statedb, _ = state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + trigger = false + ) + + // setup pool with 2 transaction in it + statedb.SetBalance(address, new(uint256.Int).SetUint64(params.Ether), tracing.BalanceChangeUnspecified) + blockchain := &testChain{newTestBlockChain(params.TestChainConfig, 1000000000, statedb, new(event.Feed)), address, &trigger} + + tx0 := transaction(0, 100000, key) + tx1 := transaction(1, 100000, key) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + nonce := pool.Nonce(address) + if nonce != 0 { + t.Fatalf("Invalid nonce, want 0, got %d", nonce) + } + + pool.addRemotesSync([]*types.Transaction{tx0, tx1}) + + nonce = pool.Nonce(address) + if nonce != 2 { + t.Fatalf("Invalid nonce, want 2, got %d", nonce) + } + + // trigger state change in the background + trigger = true + <-pool.requestReset(nil, nil) + + nonce = pool.Nonce(address) + if nonce != 2 { + t.Fatalf("Invalid nonce, want 2, got %d", nonce) + } +} + +func TestInvalidTransactionsNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx := transaction(0, 100, key) + from, _ := deriveSender(tx) + + // Intrinsic gas too low + testAddBalance(pool, from, big.NewInt(1)) + if err, want := pool.addRemote(tx), core.ErrIntrinsicGas; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + + // Insufficient funds + tx = transaction(0, 100000, key) + if err, want := pool.addRemote(tx), core.ErrInsufficientFunds; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + + testSetNonce(pool, from, 1) + testAddBalance(pool, from, big.NewInt(0xffffffffffffff)) + tx = transaction(0, 100000, key) + if err, want := pool.addRemote(tx), core.ErrNonceTooLow; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + + tx = transaction(1, 100000, key) + pool.gasTip.Store(uint256.NewInt(1000)) + if err, want := pool.addRemote(tx), txpool.ErrUnderpriced; !errors.Is(err, want) { + t.Errorf("want %v have %v", want, err) + } + if err := pool.addLocal(tx); err != nil { + t.Error("expected", nil, "got", err) + } +} + +func TestQueueNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx := transaction(0, 100, key) + from, _ := deriveSender(tx) + testAddBalance(pool, from, big.NewInt(1000)) + <-pool.requestReset(nil, nil) + + pool.enqueueTx(tx.Hash(), tx, false, true) + <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + if len(pool.pending) != 1 { + t.Error("expected valid txs to be 1 is", len(pool.pending)) + } + + tx = transaction(1, 100, key) + from, _ = deriveSender(tx) + testSetNonce(pool, from, 2) + pool.enqueueTx(tx.Hash(), tx, false, true) + + <-pool.requestPromoteExecutables(newAccountSet(pool.signer, from)) + if _, ok := pool.pending[from].txs.items[tx.Nonce()]; ok { + t.Error("expected transaction to be in tx pool") + } + if len(pool.queue) > 0 { + t.Error("expected transaction queue to be empty. is", len(pool.queue)) + } +} + +func TestQueue2NoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx1 := transaction(0, 100, key) + tx2 := transaction(10, 100, key) + tx3 := transaction(11, 100, key) + from, _ := deriveSender(tx1) + testAddBalance(pool, from, big.NewInt(1000)) + pool.reset(nil, nil) + + pool.enqueueTx(tx1.Hash(), tx1, false, true) + pool.enqueueTx(tx2.Hash(), tx2, false, true) + pool.enqueueTx(tx3.Hash(), tx3, false, true) + + pool.promoteExecutables([]common.Address{from}) + if len(pool.pending) != 1 { + t.Error("expected pending length to be 1, got", len(pool.pending)) + } + if pool.queue[from].Len() != 2 { + t.Error("expected len(queue) == 2, got", pool.queue[from].Len()) + } +} + +func TestNegativeValueNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) + from, _ := deriveSender(tx) + testAddBalance(pool, from, big.NewInt(1)) + if err := pool.addRemote(tx); err != txpool.ErrNegativeValue { + t.Error("expected", txpool.ErrNegativeValue, "got", err) + } +} + +func TestTipAboveFeeCapNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) + + if err := pool.addRemote(tx); err != core.ErrTipAboveFeeCap { + t.Error("expected", core.ErrTipAboveFeeCap, "got", err) + } +} + +func TestVeryHighValuesNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + veryBigNumber := big.NewInt(1) + veryBigNumber.Lsh(veryBigNumber, 300) + + tx := dynamicFeeTx(0, 100, big.NewInt(1), veryBigNumber, key) + if err := pool.addRemote(tx); err != core.ErrTipVeryHigh { + t.Error("expected", core.ErrTipVeryHigh, "got", err) + } + + tx2 := dynamicFeeTx(0, 100, veryBigNumber, big.NewInt(1), key) + if err := pool.addRemote(tx2); err != core.ErrFeeCapVeryHigh { + t.Error("expected", core.ErrFeeCapVeryHigh, "got", err) + } +} + +func TestChainForkNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + resetState := func() { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified) + + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + <-pool.requestReset(nil, nil) + } + resetState() + + tx := transaction(0, 100000, key) + if _, err := pool.add(tx, false); err != nil { + t.Error("didn't expect error", err) + } + pool.removeTx(tx.Hash(), true, true) + + // reset the pool's internal state + resetState() + if _, err := pool.add(tx, false); err != nil { + t.Error("didn't expect error", err) + } +} + +func TestRemoveTxSanityNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + resetState := func() { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified) + + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + <-pool.requestReset(nil, nil) + } + resetState() + + tx1 := transaction(0, 100000, key) + tx2 := transaction(1, 100000, key) + tx3 := transaction(2, 100000, key) + + if err := pool.addLocal(tx1); err != nil { + t.Error("didn't expect error", err) + } + if err := pool.addLocal(tx2); err != nil { + t.Error("didn't expect error", err) + } + if err := pool.addLocal(tx3); err != nil { + t.Error("didn't expect error", err) + } + + pendingTxs := pool.pending[addr] + if pendingTxs.Len() != 3 { + t.Error("expected 3 pending transactions, got", pendingTxs.Len()) + } + + if err := validatePoolInternals(pool); err != nil { + t.Errorf("pool internals validation failed: %v", err) + } + + n := pool.removeTx(tx1.Hash(), false, true) + if n != 3 { + t.Error("expected 3 transactions to be removed, got", n) + } + n = pool.removeTx(tx2.Hash(), false, true) + if n != 0 { + t.Error("expected 0 transactions to be removed, got", n) + } + n = pool.removeTx(tx3.Hash(), false, true) + if n != 0 { + t.Error("expected 0 transactions to be removed, got", n) + } + + if len(pool.pending) != 0 { + t.Error("expected 0 pending transactions, got", pendingTxs.Len()) + } + + if err := validatePoolInternals(pool); err != nil { + t.Errorf("pool internals validation failed: %v", err) + } +} + +func TestDoubleNonceNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + resetState := func() { + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + statedb.AddBalance(addr, uint256.NewInt(100000000000000), tracing.BalanceChangeUnspecified) + + pool.chain = newTestBlockChain(pool.chainconfig, 1000000, statedb, new(event.Feed)) + <-pool.requestReset(nil, nil) + } + resetState() + + signer := types.HomesteadSigner{} + tx1, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 100000, big.NewInt(1), nil), signer, key) + tx2, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(2), nil), signer, key) + tx3, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(100), 1000000, big.NewInt(1), nil), signer, key) + + // Add the first two transaction, ensure higher priced stays only + if replace, err := pool.add(tx1, false); err != nil || replace { + t.Errorf("first transaction insert failed (%v) or reported replacement (%v)", err, replace) + } + if replace, err := pool.add(tx2, false); err != nil || !replace { + t.Errorf("second transaction insert failed (%v) or not reported replacement (%v)", err, replace) + } + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + if pool.pending[addr].Len() != 1 { + t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) + } + if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { + t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) + } + + // Add the third transaction and ensure it's not saved (smaller price) + pool.add(tx3, false) + <-pool.requestPromoteExecutables(newAccountSet(signer, addr)) + if pool.pending[addr].Len() != 1 { + t.Error("expected 1 pending transactions, got", pool.pending[addr].Len()) + } + if tx := pool.pending[addr].txs.items[0]; tx.Hash() != tx2.Hash() { + t.Errorf("transaction mismatch: have %x, want %x", tx.Hash(), tx2.Hash()) + } + // Ensure the total transaction count is correct + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) + } +} + +func TestMissingNonceNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, addr, big.NewInt(100000000000000)) + tx := transaction(1, 100000, key) + if _, err := pool.add(tx, false); err != nil { + t.Error("didn't expect error", err) + } + if len(pool.pending) != 0 { + t.Error("expected 0 pending transactions, got", len(pool.pending)) + } + if pool.queue[addr].Len() != 1 { + t.Error("expected 1 queued transaction, got", pool.queue[addr].Len()) + } + if pool.all.Count() != 1 { + t.Error("expected 1 total transactions, got", pool.all.Count()) + } +} + +func TestNonceRecoveryNoAuctioneer(t *testing.T) { + t.Parallel() + + const n = 10 + pool, key := setupPool(false) + defer pool.Close() + + addr := crypto.PubkeyToAddress(key.PublicKey) + testSetNonce(pool, addr, n) + testAddBalance(pool, addr, big.NewInt(100000000000000)) + <-pool.requestReset(nil, nil) + + tx := transaction(n, 100000, key) + if err := pool.addRemote(tx); err != nil { + t.Error(err) + } + // simulate some weird re-order of transactions and missing nonce(s) + testSetNonce(pool, addr, n-1) + <-pool.requestReset(nil, nil) + if fn := pool.Nonce(addr); fn != n-1 { + t.Errorf("expected nonce to be %d, got %d", n-1, fn) + } +} + +// Tests that if an account runs out of funds, any pending and queued transactions +// are dropped. +func TestDroppingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000)) + + // Add some pending and some queued transactions + var ( + tx0 = transaction(0, 100, key) + tx1 = transaction(1, 200, key) + tx2 = transaction(2, 300, key) + tx10 = transaction(10, 100, key) + tx11 = transaction(11, 200, key) + tx12 = transaction(12, 300, key) + ) + pool.all.Add(tx0, false) + pool.priced.Put(tx0, false) + pool.promoteTx(account, tx0.Hash(), tx0) + + pool.all.Add(tx1, false) + pool.priced.Put(tx1, false) + pool.promoteTx(account, tx1.Hash(), tx1) + + pool.all.Add(tx2, false) + pool.priced.Put(tx2, false) + pool.promoteTx(account, tx2.Hash(), tx2) + + pool.enqueueTx(tx10.Hash(), tx10, false, true) + pool.enqueueTx(tx11.Hash(), tx11, false, true) + pool.enqueueTx(tx12.Hash(), tx12, false, true) + + // Check that pre and post validations leave the pool as is + if pool.pending[account].Len() != 3 { + t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) + } + if pool.queue[account].Len() != 3 { + t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) + } + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) + } + <-pool.requestReset(nil, nil) + if pool.pending[account].Len() != 3 { + t.Errorf("pending transaction mismatch: have %d, want %d", pool.pending[account].Len(), 3) + } + if pool.queue[account].Len() != 3 { + t.Errorf("queued transaction mismatch: have %d, want %d", pool.queue[account].Len(), 3) + } + if pool.all.Count() != 6 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 6) + } + // Reduce the balance of the account, and check that invalidated transactions are dropped + testAddBalance(pool, account, big.NewInt(-650)) + <-pool.requestReset(nil, nil) + + if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { + t.Errorf("funded pending transaction missing: %v", tx0) + } + if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; !ok { + t.Errorf("funded pending transaction missing: %v", tx0) + } + if _, ok := pool.pending[account].txs.items[tx2.Nonce()]; ok { + t.Errorf("out-of-fund pending transaction present: %v", tx1) + } + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { + t.Errorf("funded queued transaction missing: %v", tx10) + } + if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; !ok { + t.Errorf("funded queued transaction missing: %v", tx10) + } + if _, ok := pool.queue[account].txs.items[tx12.Nonce()]; ok { + t.Errorf("out-of-fund queued transaction present: %v", tx11) + } + if pool.all.Count() != 4 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 4) + } + // Reduce the block gas limit, check that invalidated transactions are dropped + pool.chain.(*testBlockChain).gasLimit.Store(100) + <-pool.requestReset(nil, nil) + + if _, ok := pool.pending[account].txs.items[tx0.Nonce()]; !ok { + t.Errorf("funded pending transaction missing: %v", tx0) + } + if _, ok := pool.pending[account].txs.items[tx1.Nonce()]; ok { + t.Errorf("over-gased pending transaction present: %v", tx1) + } + if _, ok := pool.queue[account].txs.items[tx10.Nonce()]; !ok { + t.Errorf("funded queued transaction missing: %v", tx10) + } + if _, ok := pool.queue[account].txs.items[tx11.Nonce()]; ok { + t.Errorf("over-gased queued transaction present: %v", tx11) + } + if pool.all.Count() != 2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), 2) + } +} + +// Tests that if a transaction is dropped from the current pending pool (e.g. out +// of fund), all consecutive (still valid, but not executable) transactions are +// postponed back into the future queue to prevent broadcasting them. +func TestPostponingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the postponing with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create two test accounts to produce different gap profiles with + keys := make([]*ecdsa.PrivateKey, 2) + accs := make([]common.Address, len(keys)) + + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + accs[i] = crypto.PubkeyToAddress(keys[i].PublicKey) + + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(50100)) + } + // Add a batch consecutive pending transactions for validation + txs := []*types.Transaction{} + for i, key := range keys { + for j := 0; j < 100; j++ { + var tx *types.Transaction + if (i+j)%2 == 0 { + tx = transaction(uint64(j), 25000, key) + } else { + tx = transaction(uint64(j), 50000, key) + } + txs = append(txs, tx) + } + } + for i, err := range pool.addRemotesSync(txs) { + if err != nil { + t.Fatalf("tx %d: failed to add transactions: %v", i, err) + } + } + // Check that pre and post validations leave the pool as is + if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { + t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) + } + if len(pool.queue) != 0 { + t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) + } + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) + } + <-pool.requestReset(nil, nil) + if pending := pool.pending[accs[0]].Len() + pool.pending[accs[1]].Len(); pending != len(txs) { + t.Errorf("pending transaction mismatch: have %d, want %d", pending, len(txs)) + } + if len(pool.queue) != 0 { + t.Errorf("queued accounts mismatch: have %d, want %d", len(pool.queue), 0) + } + if pool.all.Count() != len(txs) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)) + } + // Reduce the balance of the account, and check that transactions are reorganised + for _, addr := range accs { + testAddBalance(pool, addr, big.NewInt(-1)) + } + <-pool.requestReset(nil, nil) + + // The first account's first transaction remains valid, check that subsequent + // ones are either filtered out, or queued up for later. + if _, ok := pool.pending[accs[0]].txs.items[txs[0].Nonce()]; !ok { + t.Errorf("tx %d: valid and funded transaction missing from pending pool: %v", 0, txs[0]) + } + if _, ok := pool.queue[accs[0]].txs.items[txs[0].Nonce()]; ok { + t.Errorf("tx %d: valid and funded transaction present in future queue: %v", 0, txs[0]) + } + for i, tx := range txs[1:100] { + if i%2 == 1 { + if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: valid but future transaction present in pending pool: %v", i+1, tx) + } + if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; !ok { + t.Errorf("tx %d: valid but future transaction missing from future queue: %v", i+1, tx) + } + } else { + if _, ok := pool.pending[accs[0]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: out-of-fund transaction present in pending pool: %v", i+1, tx) + } + if _, ok := pool.queue[accs[0]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", i+1, tx) + } + } + } + // The second account's first transaction got invalid, check that all transactions + // are either filtered out, or queued up for later. + if pool.pending[accs[1]] != nil { + t.Errorf("invalidated account still has pending transactions") + } + for i, tx := range txs[100:] { + if i%2 == 1 { + if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; !ok { + t.Errorf("tx %d: valid but future transaction missing from future queue: %v", 100+i, tx) + } + } else { + if _, ok := pool.queue[accs[1]].txs.items[tx.Nonce()]; ok { + t.Errorf("tx %d: out-of-fund transaction present in future queue: %v", 100+i, tx) + } + } + } + if pool.all.Count() != len(txs)/2 { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), len(txs)/2) + } +} + +// Tests that if the transaction pool has both executable and non-executable +// transactions from an origin account, filling the nonce gap moves all queued +// ones into the pending pool. +func TestGapFillingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a pending and a queued transaction with a nonce-gap in between + pool.addRemotesSync([]*types.Transaction{ + transaction(0, 100000, key), + transaction(2, 100000, key), + }) + pending, queued := pool.Stats() + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Fill the nonce gap and ensure all transactions become pending + if err := pool.addRemoteSync(transaction(1, 100000, key)); err != nil { + t.Fatalf("failed to add gapped transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("gap-filling event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if the transaction count belonging to a single account goes above +// some threshold, the higher transactions are dropped to prevent DOS attacks. +func TestQueueAccountLimitingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + // Keep queuing up transactions and make sure all above a limit are dropped + for i := uint64(1); i <= testTxPoolConfig.AccountQueue+5; i++ { + if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { + t.Fatalf("tx %d: failed to add transaction: %v", i, err) + } + if len(pool.pending) != 0 { + t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, len(pool.pending), 0) + } + if i <= testTxPoolConfig.AccountQueue { + if pool.queue[account].Len() != int(i) { + t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), i) + } + } else { + if pool.queue[account].Len() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("tx %d: queue limit mismatch: have %d, want %d", i, pool.queue[account].Len(), testTxPoolConfig.AccountQueue) + } + } + } + if pool.all.Count() != int(testTxPoolConfig.AccountQueue) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue) + } +} + +// Tests that if the transaction count belonging to multiple accounts go above +// some threshold, the higher transactions are dropped to prevent DOS attacks. +// +// This logic should not hold for local transactions, unless the local tracking +// mechanism is disabled. +func TestQueueGlobalLimitingNoAuctioneer(t *testing.T) { + testQueueGlobalLimiting(t, false) +} +func TestQueueGlobalLimitingNoLocalsNoAuctioneer(t *testing.T) { + testQueueGlobalLimiting(t, true) +} + +func testQueueGlobalLimiting(t *testing.T, nolocals bool) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.NoLocals = nolocals + config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) + + pool := New(config, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them (last one will be the local) + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + local := keys[len(keys)-1] + + // Generate and queue a batch of transactions + nonces := make(map[common.Address]uint64) + + txs := make(types.Transactions, 0, 3*config.GlobalQueue) + for len(txs) < cap(txs) { + key := keys[rand.Intn(len(keys)-1)] // skip adding transactions with the local account + addr := crypto.PubkeyToAddress(key.PublicKey) + + txs = append(txs, transaction(nonces[addr]+1, 100000, key)) + nonces[addr]++ + } + // Import the batch and verify that limits have been enforced + pool.addRemotesSync(txs) + + queued := 0 + for addr, list := range pool.queue { + if list.Len() > int(config.AccountQueue) { + t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) + } + queued += list.Len() + } + if queued > int(config.GlobalQueue) { + t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) + } + // Generate a batch of transactions from the local account and import them + txs = txs[:0] + for i := uint64(0); i < 3*config.GlobalQueue; i++ { + txs = append(txs, transaction(i+1, 100000, local)) + } + pool.addLocals(txs) + + // If locals are disabled, the previous eviction algorithm should apply here too + if nolocals { + queued := 0 + for addr, list := range pool.queue { + if list.Len() > int(config.AccountQueue) { + t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) + } + queued += list.Len() + } + if queued > int(config.GlobalQueue) { + t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) + } + } else { + // Local exemptions are enabled, make sure the local account owned the queue + if len(pool.queue) != 1 { + t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1) + } + // Also ensure no local transactions are ever dropped, even if above global limits + if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue { + t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue) + } + } +} + +// Tests that if an account remains idle for a prolonged amount of time, any +// non-executable transactions queued up are dropped to prevent wasting resources +// on shuffling them around. +// +// This logic should not hold for local transactions, unless the local tracking +// mechanism is disabled. +func TestQueueTimeLimitingNoAuctioneer(t *testing.T) { + testQueueTimeLimitingNoAuctioneer(t, false) +} +func TestQueueTimeLimitingNoLocalsNoAuctioneer(t *testing.T) { + testQueueTimeLimitingNoAuctioneer(t, true) +} + +func testQueueTimeLimitingNoAuctioneer(t *testing.T, nolocals bool) { + // Reduce the eviction interval to a testable amount + defer func(old time.Duration) { evictionInterval = old }(evictionInterval) + evictionInterval = time.Millisecond * 100 + + // Create the pool to test the non-expiration enforcement + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.Lifetime = time.Second + config.NoLocals = nolocals + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create two test accounts to ensure remotes expire but locals do not + local, _ := crypto.GenerateKey() + remote, _ := crypto.GenerateKey() + + testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000)) + testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) + + // Add the two transactions and ensure they both are queued up + if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + pending, queued := pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Allow the eviction interval to run + time.Sleep(2 * evictionInterval) + + // Transactions should not be evicted from the queue yet since lifetime duration has not passed + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Wait a bit for eviction to run and clean up any leftovers, and ensure only the local remains + time.Sleep(2 * config.Lifetime) + + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if nolocals { + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + } else { + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // remove current transactions and increase nonce to prepare for a reset and cleanup + statedb.SetNonce(crypto.PubkeyToAddress(remote.PublicKey), 2) + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) + <-pool.requestReset(nil, nil) + + // make sure queue, pending are cleared + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Queue gapped transactions + if err := pool.addLocal(pricedTransaction(4, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(4, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + time.Sleep(5 * evictionInterval) // A half lifetime pass + + // Queue executable transactions, the life cycle should be restarted. + if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + time.Sleep(6 * evictionInterval) + + // All gapped transactions shouldn't be kicked out + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // The whole life time pass after last promotion, kick out stale transactions + time.Sleep(2 * config.Lifetime) + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if nolocals { + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + } else { + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that even if the transaction count belonging to a single account goes +// above some threshold, as long as the transactions are executable, they are +// accepted. +func TestPendingLimitingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000000000)) + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, testTxPoolConfig.AccountQueue+5) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Keep queuing up transactions and make sure all above a limit are dropped + for i := uint64(0); i < testTxPoolConfig.AccountQueue+5; i++ { + if err := pool.addRemoteSync(transaction(i, 100000, key)); err != nil { + t.Fatalf("tx %d: failed to add transaction: %v", i, err) + } + if pool.pending[account].Len() != int(i)+1 { + t.Errorf("tx %d: pending pool size mismatch: have %d, want %d", i, pool.pending[account].Len(), i+1) + } + if len(pool.queue) != 0 { + t.Errorf("tx %d: queue size mismatch: have %d, want %d", i, pool.queue[account].Len(), 0) + } + } + if pool.all.Count() != int(testTxPoolConfig.AccountQueue+5) { + t.Errorf("total transaction mismatch: have %d, want %d", pool.all.Count(), testTxPoolConfig.AccountQueue+5) + } + if err := validateEvents(events, int(testTxPoolConfig.AccountQueue+5)); err != nil { + t.Fatalf("event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if the transaction count belonging to multiple accounts go above +// some hard threshold, the higher transactions are dropped to prevent DOS +// attacks. +func TestPendingGlobalLimitingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = config.AccountSlots * 10 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions + nonces := make(map[common.Address]uint64) + + txs := types.Transactions{} + for _, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + for j := 0; j < int(config.GlobalSlots)/len(keys)*2; j++ { + txs = append(txs, transaction(nonces[addr], 100000, key)) + nonces[addr]++ + } + } + // Import the batch and verify that limits have been enforced + pool.addRemotesSync(txs) + + pending := 0 + for _, list := range pool.pending { + pending += list.Len() + } + if pending > int(config.GlobalSlots) { + t.Fatalf("total pending transactions overflow allowance: %d > %d", pending, config.GlobalSlots) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Test the limit on transaction size is enforced correctly. +// This test verifies every transaction having allowed size +// is added to the pool, and longer transactions are rejected. +func TestAllowedTxSizeNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create a test account and fund it + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000000)) + + // Compute maximal data size for transactions (lower bound). + // + // It is assumed the fields in the transaction (except of the data) are: + // - nonce <= 32 bytes + // - gasTip <= 32 bytes + // - gasLimit <= 32 bytes + // - recipient == 20 bytes + // - value <= 32 bytes + // - signature == 65 bytes + // All those fields are summed up to at most 213 bytes. + baseSize := uint64(213) + dataSize := txMaxSize - baseSize + // Try adding a transaction with maximal allowed size + tx := pricedDataTransaction(0, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add transaction of size %d, close to maximal: %v", int(tx.Size()), err) + } + // Try adding a transaction with random allowed size + if err := pool.addRemoteSync(pricedDataTransaction(1, pool.currentHead.Load().GasLimit, big.NewInt(1), key, uint64(rand.Intn(int(dataSize))))); err != nil { + t.Fatalf("failed to add transaction of random allowed size: %v", err) + } + // Try adding a transaction of minimal not allowed size + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, txMaxSize)); err == nil { + t.Fatalf("expected rejection on slightly oversize transaction") + } + // Try adding a transaction of random not allowed size + if err := pool.addRemoteSync(pricedDataTransaction(2, pool.currentHead.Load().GasLimit, big.NewInt(1), key, dataSize+1+uint64(rand.Intn(10*txMaxSize)))); err == nil { + t.Fatalf("expected rejection on oversize transaction") + } + // Run some sanity checks on the pool internals + pending, queued := pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if transactions start being capped, transactions are also removed from 'all' +func TestCapClearsFromAllNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.AccountSlots = 2 + config.AccountQueue = 2 + config.GlobalSlots = 8 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + key, _ := crypto.GenerateKey() + addr := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, addr, big.NewInt(1000000)) + + txs := types.Transactions{} + for j := 0; j < int(config.GlobalSlots)*2; j++ { + txs = append(txs, transaction(uint64(j), 100000, key)) + } + // Import the batch and verify that limits have been enforced + pool.addRemotes(txs) + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that if the transaction count belonging to multiple accounts go above +// some hard threshold, if they are under the minimum guaranteed slot count then +// the transactions are still kept. +func TestPendingMinimumAllowanceNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the limit enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = 1 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions + nonces := make(map[common.Address]uint64) + + txs := types.Transactions{} + for _, key := range keys { + addr := crypto.PubkeyToAddress(key.PublicKey) + for j := 0; j < int(config.AccountSlots)*2; j++ { + txs = append(txs, transaction(nonces[addr], 100000, key)) + nonces[addr]++ + } + } + // Import the batch and verify that limits have been enforced + pool.addRemotesSync(txs) + + for addr, list := range pool.pending { + if list.Len() != int(config.AccountSlots) { + t.Errorf("addr %x: total pending transactions mismatch: have %d, want %d", addr, list.Len(), config.AccountSlots) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that setting the transaction pool gas price to a higher value correctly +// discards everything cheaper than that and moves any gapped transactions back +// from the pending pool to the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestRepricingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 4) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(2), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[0])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[0])) + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[1])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[1])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[1])) + + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[2])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) + txs = append(txs, pricedTransaction(3, 100000, big.NewInt(2), keys[2])) + + ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[3]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotesSync(txs) + pool.addLocal(ltx) + + pending, queued := pool.Stats() + if pending != 7 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) + } + if queued != 3 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) + } + if err := validateEvents(events, 7); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Reprice the pool and check that underpriced transactions get dropped + pool.SetGasTip(big.NewInt(2)) + + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 5 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Check that we can't add the old transactions back + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(1), keys[0])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), keys[2])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // However we can add local underpriced transactions + tx := pricedTransaction(1, 100000, big.NewInt(1), keys[3]) + if err := pool.addLocal(tx); err != nil { + t.Fatalf("failed to add underpriced local transaction: %v", err) + } + if pending, _ = pool.Stats(); pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("post-reprice local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // And we can fill gaps with properly priced transactions + if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[0])); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil { + t.Fatalf("failed to add queued transaction: %v", err) + } + if err := validateEvents(events, 5); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +func TestMinGasPriceEnforcedNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(eip1559Config, 10000000, statedb, new(event.Feed)) + + txPoolConfig := DefaultConfig + txPoolConfig.NoLocals = true + pool := New(txPoolConfig, blockchain, false) + pool.Init(txPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000)) + + tx := pricedTransaction(0, 100000, big.NewInt(2), key) + pool.SetGasTip(big.NewInt(tx.GasPrice().Int64() + 1)) + + if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + + if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + + tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), key) + pool.SetGasTip(big.NewInt(tx.GasTipCap().Int64() + 1)) + + if err := pool.addLocal(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + + if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("Min tip not enforced") + } + // Make sure the tx is accepted if locals are enabled + pool.config.NoLocals = false + if err := pool.Add([]*types.Transaction{tx}, true, false)[0]; err != nil { + t.Fatalf("Min tip enforced with locals enabled, error: %v", err) + } +} + +// Tests that setting the transaction pool gas price to a higher value correctly +// discards everything cheaper (legacy & dynamic fee) than that and moves any +// gapped transactions back from the pending pool to the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestRepricingDynamicFeeNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + pool, _ := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 4) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(2), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[0])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(2), keys[0])) + + txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1])) + txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(3), big.NewInt(2), keys[1])) + txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(3), big.NewInt(2), keys[1])) + + txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(2), keys[2])) + txs = append(txs, dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2])) + txs = append(txs, dynamicFeeTx(3, 100000, big.NewInt(2), big.NewInt(2), keys[2])) + + ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[3]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotesSync(txs) + pool.addLocal(ltx) + + pending, queued := pool.Stats() + if pending != 7 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 7) + } + if queued != 3 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3) + } + if err := validateEvents(events, 7); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Reprice the pool and check that underpriced transactions get dropped + pool.SetGasTip(big.NewInt(2)) + + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 5 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 5) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Check that we can't add the old transactions back + tx := pricedTransaction(1, 100000, big.NewInt(1), keys[0]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + tx = dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + tx = dynamicFeeTx(2, 100000, big.NewInt(1), big.NewInt(1), keys[2]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced queued transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + if err := validateEvents(events, 0); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // However we can add local underpriced transactions + tx = dynamicFeeTx(1, 100000, big.NewInt(1), big.NewInt(1), keys[3]) + if err := pool.addLocal(tx); err != nil { + t.Fatalf("failed to add underpriced local transaction: %v", err) + } + if pending, _ = pool.Stats(); pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("post-reprice local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // And we can fill gaps with properly priced transactions + tx = pricedTransaction(1, 100000, big.NewInt(2), keys[0]) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + tx = dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[1]) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to add pending transaction: %v", err) + } + tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2]) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add queued transaction: %v", err) + } + if err := validateEvents(events, 5); err != nil { + t.Fatalf("post-reprice event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that setting the transaction pool gas price to a higher value does not +// remove local transactions (legacy & dynamic fee). +func TestRepricingKeepsLocalsNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(eip1559Config, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 3) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(100000*1000000)) + } + // Create transaction (both pending and queued) with a linearly growing gasprice + for i := uint64(0); i < 500; i++ { + // Add pending transaction. + pendingTx := pricedTransaction(i, 100000, big.NewInt(int64(i)), keys[2]) + if err := pool.addLocal(pendingTx); err != nil { + t.Fatal(err) + } + // Add queued transaction. + queuedTx := pricedTransaction(i+501, 100000, big.NewInt(int64(i)), keys[2]) + if err := pool.addLocal(queuedTx); err != nil { + t.Fatal(err) + } + + // Add pending dynamic fee transaction. + pendingTx = dynamicFeeTx(i, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) + if err := pool.addLocal(pendingTx); err != nil { + t.Fatal(err) + } + // Add queued dynamic fee transaction. + queuedTx = dynamicFeeTx(i+501, 100000, big.NewInt(int64(i)+1), big.NewInt(int64(i)), keys[1]) + if err := pool.addLocal(queuedTx); err != nil { + t.Fatal(err) + } + } + pending, queued := pool.Stats() + expPending, expQueued := 1000, 1000 + validate := func() { + pending, queued = pool.Stats() + if pending != expPending { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, expPending) + } + if queued != expQueued { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, expQueued) + } + + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + } + validate() + + // Reprice the pool and check that nothing is dropped + pool.SetGasTip(big.NewInt(2)) + validate() + + pool.SetGasTip(big.NewInt(2)) + pool.SetGasTip(big.NewInt(4)) + pool.SetGasTip(big.NewInt(8)) + pool.SetGasTip(big.NewInt(100)) + validate() +} + +// Tests that when the pool reaches its global transaction limit, underpriced +// transactions are gradually shifted out for more expensive ones and any gapped +// pending transactions are moved into the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestUnderpricingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = 2 + config.GlobalQueue = 2 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 5) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) + + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(1), keys[1])) + + ltx := pricedTransaction(0, 100000, big.NewInt(1), keys[2]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotes(txs) + pool.addLocal(ltx) + + pending, queued := pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 3); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding an underpriced transaction on block limit fails + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + // Replace a future transaction with a future transaction + if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("failed to add well priced transaction: %v", err) + } + // Ensure that adding high priced transactions drops cheap ones, but not own + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + t.Fatalf("failed to add well priced transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2 + t.Fatalf("failed to add well priced transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3 + t.Fatalf("failed to add well priced transaction: %v", err) + } + // Ensure that replacing a pending transaction with a future transaction fails + if err := pool.addRemote(pricedTransaction(5, 100000, big.NewInt(6), keys[1])); err != txpool.ErrFutureReplacePending { + t.Fatalf("adding future replace transaction error mismatch: have %v, want %v", err, txpool.ErrFutureReplacePending) + } + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("additional event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding local transactions can push out even higher priced ones + ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to append underpriced local transaction: %v", err) + } + ltx = pricedTransaction(0, 100000, big.NewInt(0), keys[3]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to add new underpriced local transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that more expensive transactions push out cheap ones from the pool, but +// without producing instability by creating gaps that start jumping transactions +// back and forth between queued/pending. +func TestStableUnderpricingNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.GlobalSlots = 128 + config.GlobalQueue = 0 + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 2) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Fill up the entire queue with the same transaction price points + txs := types.Transactions{} + for i := uint64(0); i < config.GlobalSlots; i++ { + txs = append(txs, pricedTransaction(i, 100000, big.NewInt(1), keys[0])) + } + pool.addRemotesSync(txs) + + pending, queued := pool.Stats() + if pending != int(config.GlobalSlots) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, int(config.GlobalSlots)); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding high priced transactions drops a cheap, but doesn't produce a gap + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { + t.Fatalf("failed to add well priced transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != int(config.GlobalSlots) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, config.GlobalSlots) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validateEvents(events, 1); err != nil { + t.Fatalf("additional event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that when the pool reaches its global transaction limit, underpriced +// transactions (legacy & dynamic fee) are gradually shifted out for more +// expensive ones and any gapped pending transactions are moved into the queue. +// +// Note, local transactions are never allowed to be dropped. +func TestUnderpricingDynamicFeeNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, _ := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + pool.config.GlobalSlots = 2 + pool.config.GlobalQueue = 2 + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a number of test accounts and fund them + keys := make([]*ecdsa.PrivateKey, 4) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, dynamicFeeTx(0, 100000, big.NewInt(3), big.NewInt(2), keys[0])) + txs = append(txs, pricedTransaction(1, 100000, big.NewInt(2), keys[0])) + txs = append(txs, dynamicFeeTx(1, 100000, big.NewInt(2), big.NewInt(1), keys[1])) + + ltx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[2]) + + // Import the batch and that both pending and queued transactions match up + pool.addRemotes(txs) // Pend K0:0, K0:1; Que K1:1 + pool.addLocal(ltx) // +K2:0 => Pend K0:0, K0:1, K2:0; Que K1:1 + + pending, queued := pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 3); err != nil { + t.Fatalf("original event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + + // Ensure that adding an underpriced transaction fails + tx := dynamicFeeTx(0, 100000, big.NewInt(2), big.NewInt(1), keys[1]) + if err := pool.addRemote(tx); !errors.Is(err, txpool.ErrUnderpriced) { // Pend K0:0, K0:1, K2:0; Que K1:1 + t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) + } + + // Ensure that adding high priced transactions drops cheap ones, but not own + tx = pricedTransaction(0, 100000, big.NewInt(2), keys[1]) + if err := pool.addRemote(tx); err != nil { // +K1:0, -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que - + t.Fatalf("failed to add well priced transaction: %v", err) + } + + tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1]) + if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2 + t.Fatalf("failed to add well priced transaction: %v", err) + } + tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1]) + if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3 + t.Fatalf("failed to add well priced transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("additional event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Ensure that adding local transactions can push out even higher priced ones + ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to append underpriced local transaction: %v", err) + } + ltx = dynamicFeeTx(0, 100000, big.NewInt(0), big.NewInt(0), keys[3]) + if err := pool.addLocal(ltx); err != nil { + t.Fatalf("failed to add new underpriced local transaction: %v", err) + } + pending, queued = pool.Stats() + if pending != 3 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 3) + } + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("local event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests whether highest fee cap transaction is retained after a batch of high effective +// tip transactions are added and vice versa +func TestDualHeapEvictionNoAuctioneer(t *testing.T) { + t.Parallel() + + pool, _ := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + + pool.config.GlobalSlots = 10 + pool.config.GlobalQueue = 10 + + var ( + highTip, highCap *types.Transaction + baseFee int + ) + + check := func(tx *types.Transaction, name string) { + if pool.all.GetRemote(tx.Hash()) == nil { + t.Fatalf("highest %s transaction evicted from the pool", name) + } + } + + add := func(urgent bool) { + for i := 0; i < 20; i++ { + var tx *types.Transaction + // Create a test accounts and fund it + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000000)) + if urgent { + tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+1+i)), big.NewInt(int64(1+i)), key) + highTip = tx + } else { + tx = dynamicFeeTx(0, 100000, big.NewInt(int64(baseFee+200+i)), big.NewInt(1), key) + highCap = tx + } + pool.addRemotesSync([]*types.Transaction{tx}) + } + pending, queued := pool.Stats() + if pending+queued != 20 { + t.Fatalf("transaction count mismatch: have %d, want %d", pending+queued, 10) + } + } + + add(false) + for baseFee = 0; baseFee <= 1000; baseFee += 100 { + pool.priced.SetBaseFee(big.NewInt(int64(baseFee))) + add(true) + check(highCap, "fee cap") + add(false) + check(highTip, "effective tip") + } + + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that the pool rejects duplicate transactions. +func TestDeduplicationNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create a test account to add transactions with + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) + + // Create a batch of transactions and add a few of them + txs := make([]*types.Transaction, 16) + for i := 0; i < len(txs); i++ { + txs[i] = pricedTransaction(uint64(i), 100000, big.NewInt(1), key) + } + var firsts []*types.Transaction + for i := 0; i < len(txs); i += 2 { + firsts = append(firsts, txs[i]) + } + errs := pool.addRemotesSync(firsts) + if len(errs) != len(firsts) { + t.Fatalf("first add mismatching result count: have %d, want %d", len(errs), len(firsts)) + } + for i, err := range errs { + if err != nil { + t.Errorf("add %d failed: %v", i, err) + } + } + pending, queued := pool.Stats() + if pending != 1 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 1) + } + if queued != len(txs)/2-1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, len(txs)/2-1) + } + // Try to add all of them now and ensure previous ones error out as knowns + errs = pool.addRemotesSync(txs) + if len(errs) != len(txs) { + t.Fatalf("all add mismatching result count: have %d, want %d", len(errs), len(txs)) + } + for i, err := range errs { + if i%2 == 0 && err == nil { + t.Errorf("add %d succeeded, should have failed as known", i) + } + if i%2 == 1 && err != nil { + t.Errorf("add %d failed: %v", i, err) + } + } + pending, queued = pool.Stats() + if pending != len(txs) { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, len(txs)) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that the pool rejects replacement transactions that don't meet the minimum +// price bump required. +func TestReplacementNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Create a test account to add transactions with + key, _ := crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) + + // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) + price := int64(100) + threshold := (price * (100 + int64(testTxPoolConfig.PriceBump))) / 100 + + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), key)); err != nil { + t.Fatalf("failed to add original cheap pending transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), key)); err != nil { + t.Fatalf("failed to replace original cheap pending transaction: %v", err) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("cheap replacement event firing failed: %v", err) + } + + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(price), key)); err != nil { + t.Fatalf("failed to add original proper pending transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(0, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper pending transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(threshold), key)); err != nil { + t.Fatalf("failed to replace original proper pending transaction: %v", err) + } + if err := validateEvents(events, 2); err != nil { + t.Fatalf("proper replacement event firing failed: %v", err) + } + + // Add queued transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(1), key)); err != nil { + t.Fatalf("failed to add original cheap queued transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), key)); err != nil { + t.Fatalf("failed to replace original cheap queued transaction: %v", err) + } + + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(price), key)); err != nil { + t.Fatalf("failed to add original proper queued transaction: %v", err) + } + if err := pool.addRemote(pricedTransaction(2, 100001, big.NewInt(threshold-1), key)); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper queued transaction replacement error mismatch: have %v, want %v", err, txpool.ErrReplaceUnderpriced) + } + if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(threshold), key)); err != nil { + t.Fatalf("failed to replace original proper queued transaction: %v", err) + } + + if err := validateEvents(events, 0); err != nil { + t.Fatalf("queued replacement event firing failed: %v", err) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that the pool rejects replacement dynamic fee transactions that don't +// meet the minimum price bump required. +func TestReplacementDynamicFeeNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the pricing enforcement with + pool, key := setupPoolWithConfig(eip1559Config, false) + defer pool.Close() + testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) + + // Keep track of transaction events to ensure all executables get announced + events := make(chan core.NewTxsEvent, 32) + sub := pool.txFeed.Subscribe(events) + defer sub.Unsubscribe() + + // Add pending transactions, ensuring the minimum price bump is enforced for replacement (for ultra low prices too) + gasFeeCap := int64(100) + feeCapThreshold := (gasFeeCap * (100 + int64(testTxPoolConfig.PriceBump))) / 100 + gasTipCap := int64(60) + tipThreshold := (gasTipCap * (100 + int64(testTxPoolConfig.PriceBump))) / 100 + + // Run the following identical checks for both the pending and queue pools: + // 1. Send initial tx => accept + // 2. Don't bump tip or fee cap => discard + // 3. Bump both more than min => accept + // 4. Check events match expected (2 new executable txs during pending, 0 during queue) + // 5. Send new tx with larger tip and gasFeeCap => accept + // 6. Bump tip max allowed so it's still underpriced => discard + // 7. Bump fee cap max allowed so it's still underpriced => discard + // 8. Bump tip min for acceptance => discard + // 9. Bump feecap min for acceptance => discard + // 10. Bump feecap and tip min for acceptance => accept + // 11. Check events match expected (2 new executable txs during pending, 0 during queue) + stages := []string{"pending", "queued"} + for _, stage := range stages { + // Since state is empty, 0 nonce txs are "executable" and can go + // into pending immediately. 2 nonce txs are "gapped" + nonce := uint64(0) + if stage == "queued" { + nonce = 2 + } + + // 1. Send initial tx => accept + tx := dynamicFeeTx(nonce, 100000, big.NewInt(2), big.NewInt(1), key) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add original cheap %s transaction: %v", stage, err) + } + // 2. Don't bump tip or feecap => discard + tx = dynamicFeeTx(nonce, 100001, big.NewInt(2), big.NewInt(1), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original cheap %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 3. Bump both more than min => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(3), big.NewInt(2), key) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) + } + // 4. Check events match expected (2 new executable txs during pending, 0 during queue) + count := 2 + if stage == "queued" { + count = 0 + } + if err := validateEvents(events, count); err != nil { + t.Fatalf("cheap %s replacement event firing failed: %v", stage, err) + } + // 5. Send new tx with larger tip and feeCap => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(gasTipCap), key) + if err := pool.addRemoteSync(tx); err != nil { + t.Fatalf("failed to add original proper %s transaction: %v", stage, err) + } + // 6. Bump tip max allowed so it's still underpriced => discard + tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold-1), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 7. Bump fee cap max allowed so it's still underpriced => discard + tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold-1), big.NewInt(gasTipCap), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 8. Bump tip min for acceptance => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(gasFeeCap), big.NewInt(tipThreshold), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 9. Bump fee cap min for acceptance => accept + tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(gasTipCap), key) + if err := pool.addRemote(tx); err != txpool.ErrReplaceUnderpriced { + t.Fatalf("original proper %s transaction replacement error mismatch: have %v, want %v", stage, err, txpool.ErrReplaceUnderpriced) + } + // 10. Check events match expected (3 new executable txs during pending, 0 during queue) + tx = dynamicFeeTx(nonce, 100000, big.NewInt(feeCapThreshold), big.NewInt(tipThreshold), key) + if err := pool.addRemote(tx); err != nil { + t.Fatalf("failed to replace original cheap %s transaction: %v", stage, err) + } + // 11. Check events match expected (3 new executable txs during pending, 0 during queue) + count = 2 + if stage == "queued" { + count = 0 + } + if err := validateEvents(events, count); err != nil { + t.Fatalf("replacement %s event firing failed: %v", stage, err) + } + } + + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } +} + +// Tests that local transactions are journaled to disk, but remote transactions +// get discarded between restarts. +func TestJournalingNoAuctioneer(t *testing.T) { testJournalingNoAuctioneer(t, false) } +func TestJournalingNoLocalsNoAuctioneer(t *testing.T) { testJournalingNoAuctioneer(t, true) } + +func testJournalingNoAuctioneer(t *testing.T, nolocals bool) { + t.Parallel() + + // Create a temporary file for the journal + file, err := os.CreateTemp("", "") + if err != nil { + t.Fatalf("failed to create temporary journal: %v", err) + } + journal := file.Name() + defer os.Remove(journal) + + // Clean up the temporary file, we only need the path for now + file.Close() + os.Remove(journal) + + // Create the original pool to inject transaction into the journal + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + config := testTxPoolConfig + config.NoLocals = nolocals + config.Journal = journal + config.Rejournal = time.Second + + pool := New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + + // Create two test accounts to ensure remotes expire but locals do not + local, _ := crypto.GenerateKey() + remote, _ := crypto.GenerateKey() + + testAddBalance(pool, crypto.PubkeyToAddress(local.PublicKey), big.NewInt(1000000000)) + testAddBalance(pool, crypto.PubkeyToAddress(remote.PublicKey), big.NewInt(1000000000)) + + // Add three local and a remote transactions and ensure they are queued up + if err := pool.addLocal(pricedTransaction(0, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addLocal(pricedTransaction(1, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addLocal(pricedTransaction(2, 100000, big.NewInt(1), local)); err != nil { + t.Fatalf("failed to add local transaction: %v", err) + } + if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), remote)); err != nil { + t.Fatalf("failed to add remote transaction: %v", err) + } + pending, queued := pool.Stats() + if pending != 4 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 4) + } + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Terminate the old pool, bump the local nonce, create a new pool and ensure relevant transaction survive + pool.Close() + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) + blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool = New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + + pending, queued = pool.Stats() + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + if nolocals { + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + } else { + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Bump the nonce temporarily and ensure the newly invalidated transaction is removed + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 2) + <-pool.requestReset(nil, nil) + time.Sleep(2 * config.Rejournal) + pool.Close() + + statedb.SetNonce(crypto.PubkeyToAddress(local.PublicKey), 1) + blockchain = newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + pool = New(config, blockchain, false) + pool.Init(config.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + + pending, queued = pool.Stats() + if pending != 0 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 0) + } + if nolocals { + if queued != 0 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 0) + } + } else { + if queued != 1 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 1) + } + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + pool.Close() +} + +// TestStatusCheck tests that the pool can correctly retrieve the +// pending status of individual transactions. +func TestStatusCheckNoAuctioneer(t *testing.T) { + t.Parallel() + + // Create the pool to test the status retrievals with + statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) + blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) + + pool := New(testTxPoolConfig, blockchain, false) + pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) + defer pool.Close() + + // Create the test accounts to check various transaction statuses with + keys := make([]*ecdsa.PrivateKey, 3) + for i := 0; i < len(keys); i++ { + keys[i], _ = crypto.GenerateKey() + testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) + } + // Generate and queue a batch of transactions, both pending and queued + txs := types.Transactions{} + + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[0])) // Pending only + txs = append(txs, pricedTransaction(0, 100000, big.NewInt(1), keys[1])) // Pending and queued + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[1])) + txs = append(txs, pricedTransaction(2, 100000, big.NewInt(1), keys[2])) // Queued only + + // Import the transaction and ensure they are correctly added + pool.addRemotesSync(txs) + + pending, queued := pool.Stats() + if pending != 2 { + t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2) + } + if queued != 2 { + t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2) + } + if err := validatePoolInternals(pool); err != nil { + t.Fatalf("pool internal state corrupted: %v", err) + } + // Retrieve the status of each transaction and validate them + hashes := make([]common.Hash, len(txs)) + for i, tx := range txs { + hashes[i] = tx.Hash() + } + hashes = append(hashes, common.Hash{}) + expect := []txpool.TxStatus{txpool.TxStatusPending, txpool.TxStatusPending, txpool.TxStatusQueued, txpool.TxStatusQueued, txpool.TxStatusUnknown} + + for i := 0; i < len(hashes); i++ { + if status := pool.Status(hashes[i]); status != expect[i] { + t.Errorf("transaction %d: status mismatch: have %v, want %v", i, status, expect[i]) + } + } +} + +// Test the transaction slots consumption is computed correctly +func TestSlotCountNoAuctioneer(t *testing.T) { + t.Parallel() + + key, _ := crypto.GenerateKey() + + // Check that an empty transaction consumes a single slot + smallTx := pricedDataTransaction(0, 0, big.NewInt(0), key, 0) + if slots := numSlots(smallTx); slots != 1 { + t.Fatalf("small transactions slot count mismatch: have %d want %d", slots, 1) + } + // Check that a large transaction consumes the correct number of slots + bigTx := pricedDataTransaction(0, 0, big.NewInt(0), key, uint64(10*txSlotSize)) + if slots := numSlots(bigTx); slots != 11 { + t.Fatalf("big transactions slot count mismatch: have %d want %d", slots, 11) + } +} + +// Benchmarks the speed of validating the contents of the pending queue of the +// transaction pool. +func BenchmarkPendingDemotion100NoAuctioneer(b *testing.B) { + benchmarkPendingDemotionNoAuctioneer(b, 100) +} +func BenchmarkPendingDemotion1000NoAuctioneer(b *testing.B) { + benchmarkPendingDemotionNoAuctioneer(b, 1000) +} +func BenchmarkPendingDemotion10000NoAuctioneer(b *testing.B) { + benchmarkPendingDemotionNoAuctioneer(b, 10000) +} + +func benchmarkPendingDemotionNoAuctioneer(b *testing.B, size int) { + // Add a batch of transactions to a pool one by one + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + for i := 0; i < size; i++ { + tx := transaction(uint64(i), 100000, key) + pool.promoteTx(account, tx.Hash(), tx) + } + // Benchmark the speed of pool validation + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.demoteUnexecutables() + } +} + +// Benchmarks the speed of scheduling the contents of the future queue of the +// transaction pool. +func BenchmarkFuturePromotion100NoAuctioneer(b *testing.B) { + benchmarkFuturePromotionNoAuctioneer(b, 100) +} +func BenchmarkFuturePromotion1000NoAuctioneer(b *testing.B) { + benchmarkFuturePromotionNoAuctioneer(b, 1000) +} +func BenchmarkFuturePromotion10000NoAuctioneer(b *testing.B) { + benchmarkFuturePromotionNoAuctioneer(b, 10000) +} + +func benchmarkFuturePromotionNoAuctioneer(b *testing.B, size int) { + // Add a batch of transactions to a pool one by one + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000)) + + for i := 0; i < size; i++ { + tx := transaction(uint64(1+i), 100000, key) + pool.enqueueTx(tx.Hash(), tx, false, true) + } + // Benchmark the speed of pool validation + b.ResetTimer() + for i := 0; i < b.N; i++ { + pool.promoteExecutables(nil) + } +} + +// Benchmarks the speed of batched transaction insertion. +func BenchmarkBatchInsert100NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 100, false) +} +func BenchmarkBatchInsert1000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 1000, false) +} +func BenchmarkBatchInsert10000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 10000, false) +} + +func BenchmarkBatchLocalInsert100NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 100, true) +} +func BenchmarkBatchLocalInsert1000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 1000, true) +} +func BenchmarkBatchLocalInsert10000NoAuctioneer(b *testing.B) { + benchmarkBatchInsertNoAuctioneer(b, 10000, true) +} + +func benchmarkBatchInsertNoAuctioneer(b *testing.B, size int, local bool) { + // Generate a batch of transactions to enqueue into the pool + pool, key := setupPool(false) + defer pool.Close() + + account := crypto.PubkeyToAddress(key.PublicKey) + testAddBalance(pool, account, big.NewInt(1000000000000000000)) + + batches := make([]types.Transactions, b.N) + for i := 0; i < b.N; i++ { + batches[i] = make(types.Transactions, size) + for j := 0; j < size; j++ { + batches[i][j] = transaction(uint64(size*i+j), 100000, key) + } + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for _, batch := range batches { + if local { + pool.addLocals(batch) + } else { + pool.addRemotes(batch) + } + } +} + +func BenchmarkInsertRemoteWithAllLocalsNoAuctioneer(b *testing.B) { + // Allocate keys for testing + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + + remoteKey, _ := crypto.GenerateKey() + remoteAddr := crypto.PubkeyToAddress(remoteKey.PublicKey) + + locals := make([]*types.Transaction, 4096+1024) // Occupy all slots + for i := 0; i < len(locals); i++ { + locals[i] = transaction(uint64(i), 100000, key) + } + remotes := make([]*types.Transaction, 1000) + for i := 0; i < len(remotes); i++ { + remotes[i] = pricedTransaction(uint64(i), 100000, big.NewInt(2), remoteKey) // Higher gasprice + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for i := 0; i < b.N; i++ { + b.StopTimer() + pool, _ := setupPool(false) + testAddBalance(pool, account, big.NewInt(100000000)) + for _, local := range locals { + pool.addLocal(local) + } + b.StartTimer() + // Assign a high enough balance for testing + testAddBalance(pool, remoteAddr, big.NewInt(100000000)) + for i := 0; i < len(remotes); i++ { + pool.addRemotes([]*types.Transaction{remotes[i]}) + } + pool.Close() + } +} + +// Benchmarks the speed of batch transaction insertion in case of multiple accounts. +func BenchmarkMultiAccountBatchInsertNoAuctioneer(b *testing.B) { + // Generate a batch of transactions to enqueue into the pool + pool, _ := setupPool(false) + defer pool.Close() + b.ReportAllocs() + batches := make(types.Transactions, b.N) + for i := 0; i < b.N; i++ { + key, _ := crypto.GenerateKey() + account := crypto.PubkeyToAddress(key.PublicKey) + pool.currentState.AddBalance(account, uint256.NewInt(1000000), tracing.BalanceChangeUnspecified) + tx := transaction(uint64(0), 100000, key) + batches[i] = tx + } + // Benchmark importing the transactions into the queue + b.ResetTimer() + for _, tx := range batches { + pool.addRemotesSync([]*types.Transaction{tx}) + } +} diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index c3bf450a9..00620ec68 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -159,16 +159,16 @@ func makeAddressReserver() txpool.AddressReserver { } } -func setupPool() (*LegacyPool, *ecdsa.PrivateKey) { - return setupPoolWithConfig(params.TestChainConfig) +func setupPool(auctioneerEnabled bool) (*LegacyPool, *ecdsa.PrivateKey) { + return setupPoolWithConfig(params.TestChainConfig, auctioneerEnabled) } -func setupPoolWithConfig(config *params.ChainConfig) (*LegacyPool, *ecdsa.PrivateKey) { +func setupPoolWithConfig(config *params.ChainConfig, auctioneerEnabled bool) (*LegacyPool, *ecdsa.PrivateKey) { statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) blockchain := newTestBlockChain(config, 10000000, statedb, new(event.Feed)) key, _ := crypto.GenerateKey() - pool := New(testTxPoolConfig, blockchain, true) + pool := New(testTxPoolConfig, blockchain, auctioneerEnabled) if err := pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()); err != nil { panic(err) } @@ -341,7 +341,7 @@ func testSetNonce(pool *LegacyPool, addr common.Address, nonce uint64) { func TestInvalidTransactions(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx := transaction(0, 100, key) @@ -379,7 +379,7 @@ func TestInvalidTransactions(t *testing.T) { func TestQueue(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx := transaction(0, 100, key) @@ -410,7 +410,7 @@ func TestQueue(t *testing.T) { func TestQueue2(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx1 := transaction(0, 100, key) @@ -436,7 +436,7 @@ func TestQueue2(t *testing.T) { func TestNegativeValue(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() tx, _ := types.SignTx(types.NewTransaction(0, common.Address{}, big.NewInt(-1), 100, big.NewInt(1), nil), types.HomesteadSigner{}, key) @@ -450,7 +450,7 @@ func TestNegativeValue(t *testing.T) { func TestTipAboveFeeCap(t *testing.T) { t.Parallel() - pool, key := setupPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config, true) defer pool.Close() tx := dynamicFeeTx(0, 100, big.NewInt(1), big.NewInt(2), key) @@ -463,7 +463,7 @@ func TestTipAboveFeeCap(t *testing.T) { func TestVeryHighValues(t *testing.T) { t.Parallel() - pool, key := setupPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config, true) defer pool.Close() veryBigNumber := big.NewInt(1) @@ -483,7 +483,7 @@ func TestVeryHighValues(t *testing.T) { func TestChainFork(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -512,7 +512,7 @@ func TestChainFork(t *testing.T) { func TestRemoveTxSanity(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -573,7 +573,7 @@ func TestRemoveTxSanity(t *testing.T) { func TestDoubleNonce(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -624,7 +624,7 @@ func TestDoubleNonce(t *testing.T) { func TestMissingNonce(t *testing.T) { t.Parallel() - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -648,7 +648,7 @@ func TestNonceRecovery(t *testing.T) { t.Parallel() const n = 10 - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() addr := crypto.PubkeyToAddress(key.PublicKey) @@ -674,7 +674,7 @@ func TestDropping(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -837,7 +837,7 @@ func TestGapFilling(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -891,7 +891,7 @@ func TestQueueAccountLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -920,96 +920,6 @@ func TestQueueAccountLimiting(t *testing.T) { } } -// Tests that if the transaction count belonging to multiple accounts go above -// some threshold, the higher transactions are dropped to prevent DOS attacks. -// -// This logic should not hold for local transactions, unless the local tracking -// mechanism is disabled. -func TestQueueGlobalLimiting(t *testing.T) { - testQueueGlobalLimiting(t, false) -} -func TestQueueGlobalLimitingNoLocals(t *testing.T) { - testQueueGlobalLimiting(t, true) -} - -func testQueueGlobalLimiting(t *testing.T, nolocals bool) { - t.Parallel() - - // Create the pool to test the limit enforcement with - statedb, _ := state.New(types.EmptyRootHash, state.NewDatabase(rawdb.NewMemoryDatabase()), nil) - blockchain := newTestBlockChain(params.TestChainConfig, 1000000, statedb, new(event.Feed)) - - config := testTxPoolConfig - config.NoLocals = nolocals - config.GlobalQueue = config.AccountQueue*3 - 1 // reduce the queue limits to shorten test time (-1 to make it non divisible) - - pool := New(config, blockchain, true) - pool.Init(testTxPoolConfig.PriceLimit, blockchain.CurrentBlock(), makeAddressReserver()) - defer pool.Close() - - // Create a number of test accounts and fund them (last one will be the local) - keys := make([]*ecdsa.PrivateKey, 5) - for i := 0; i < len(keys); i++ { - keys[i], _ = crypto.GenerateKey() - testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) - } - local := keys[len(keys)-1] - - // Generate and queue a batch of transactions - nonces := make(map[common.Address]uint64) - - txs := make(types.Transactions, 0, 3*config.GlobalQueue) - for len(txs) < cap(txs) { - key := keys[rand.Intn(len(keys)-1)] // skip adding transactions with the local account - addr := crypto.PubkeyToAddress(key.PublicKey) - - txs = append(txs, transaction(nonces[addr]+1, 100000, key)) - nonces[addr]++ - } - // Import the batch and verify that limits have been enforced - pool.addRemotesSync(txs) - - queued := 0 - for addr, list := range pool.queue { - if list.Len() > int(config.AccountQueue) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) - } - queued += list.Len() - } - if queued > int(config.GlobalQueue) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) - } - // Generate a batch of transactions from the local account and import them - txs = txs[:0] - for i := uint64(0); i < 3*config.GlobalQueue; i++ { - txs = append(txs, transaction(i+1, 100000, local)) - } - pool.addLocals(txs) - - // If locals are disabled, the previous eviction algorithm should apply here too - if nolocals { - queued := 0 - for addr, list := range pool.queue { - if list.Len() > int(config.AccountQueue) { - t.Errorf("addr %x: queued accounts overflown allowance: %d > %d", addr, list.Len(), config.AccountQueue) - } - queued += list.Len() - } - if queued > int(config.GlobalQueue) { - t.Fatalf("total transactions overflow allowance: %d > %d", queued, config.GlobalQueue) - } - } else { - // Local exemptions are enabled, make sure the local account owned the queue - if len(pool.queue) != 1 { - t.Errorf("multiple accounts in queue: have %v, want %v", len(pool.queue), 1) - } - // Also ensure no local transactions are ever dropped, even if above global limits - if queued := pool.queue[crypto.PubkeyToAddress(local.PublicKey)].Len(); uint64(queued) != 3*config.GlobalQueue { - t.Fatalf("local account queued transaction count mismatch: have %v, want %v", queued, 3*config.GlobalQueue) - } - } -} - // Tests that if an account remains idle for a prolonged amount of time, any // non-executable transactions queued up are dropped to prevent wasting resources // on shuffling them around. @@ -1142,7 +1052,7 @@ func TestPendingLimiting(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -1232,7 +1142,7 @@ func TestAllowedTxSize(t *testing.T) { t.Parallel() // Create a test account and fund it - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -1538,7 +1448,7 @@ func TestRepricingDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, _ := setupPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config, true) defer pool.Close() // Keep track of transaction events to ensure all executables get announced @@ -1920,7 +1830,7 @@ func TestStableUnderpricing(t *testing.T) { func TestUnderpricingDynamicFee(t *testing.T) { t.Parallel() - pool, _ := setupPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config, true) defer pool.Close() pool.config.GlobalSlots = 2 @@ -2027,7 +1937,7 @@ func TestUnderpricingDynamicFee(t *testing.T) { func TestDualHeapEviction(t *testing.T) { t.Parallel() - pool, _ := setupPoolWithConfig(eip1559Config) + pool, _ := setupPoolWithConfig(eip1559Config, true) defer pool.Close() pool.config.GlobalSlots = 10 @@ -2232,7 +2142,7 @@ func TestReplacementDynamicFee(t *testing.T) { t.Parallel() // Create the pool to test the pricing enforcement with - pool, key := setupPoolWithConfig(eip1559Config) + pool, key := setupPoolWithConfig(eip1559Config, true) defer pool.Close() testAddBalance(pool, crypto.PubkeyToAddress(key.PublicKey), big.NewInt(1000000000)) @@ -2338,7 +2248,6 @@ func TestReplacementDynamicFee(t *testing.T) { // Tests that local transactions are journaled to disk, but remote transactions // get discarded between restarts. -// TODO - fix this func TestJournaling(t *testing.T) { testJournaling(t, false) } func TestJournalingNoLocals(t *testing.T) { testJournaling(t, true) } @@ -2546,7 +2455,7 @@ func BenchmarkPendingDemotion10000(b *testing.B) { benchmarkPendingDemotion(b, 1 func benchmarkPendingDemotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2571,7 +2480,7 @@ func BenchmarkFuturePromotion10000(b *testing.B) { benchmarkFuturePromotion(b, 1 func benchmarkFuturePromotion(b *testing.B, size int) { // Add a batch of transactions to a pool one by one - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2599,7 +2508,7 @@ func BenchmarkBatchLocalInsert10000(b *testing.B) { benchmarkBatchInsert(b, 1000 func benchmarkBatchInsert(b *testing.B, size int, local bool) { // Generate a batch of transactions to enqueue into the pool - pool, key := setupPool() + pool, key := setupPool(true) defer pool.Close() account := crypto.PubkeyToAddress(key.PublicKey) @@ -2643,7 +2552,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { b.StopTimer() - pool, _ := setupPool() + pool, _ := setupPool(true) testAddBalance(pool, account, big.NewInt(100000000)) for _, local := range locals { pool.addLocal(local) @@ -2661,7 +2570,7 @@ func BenchmarkInsertRemoteWithAllLocals(b *testing.B) { // Benchmarks the speed of batch transaction insertion in case of multiple accounts. func BenchmarkMultiAccountBatchInsert(b *testing.B) { // Generate a batch of transactions to enqueue into the pool - pool, _ := setupPool() + pool, _ := setupPool(true) defer pool.Close() b.ReportAllocs() batches := make(types.Transactions, b.N) From 61af0ecfc1070b923c7c6b459623fc789fa5bf31 Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 13 Nov 2024 18:03:46 +0530 Subject: [PATCH 55/79] unmarshall auction result --- go.mod | 4 +- go.sum | 4 + grpc/execution/server.go | 12 +- grpc/optimistic/server.go | 20 +-- grpc/shared/validation.go | 217 +++++++++++++++-------- grpc/shared/validation_test.go | 306 +++++++++++++++++++++++++++------ 6 files changed, 418 insertions(+), 145 deletions(-) diff --git a/go.mod b/go.mod index 1055cd47c..946712d50 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/ethereum/go-ethereum go 1.21 require ( - buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1 - buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1 + buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 + buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1 buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 diff --git a/go.sum b/go.sum index 83a47af38..a5c55efb7 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,9 @@ +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 h1:wOry49zAbse0G4mt2tFTwa4P2AUMuYCR/0mYcPrpcbs= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1/go.mod h1:+pVCkEpJNp2JtooS8NiydT7bO9+hu11XUZ5Z47DPtXo= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1 h1:v7QnrDjNmG7I/0aqZdtlP3cBPQGd62w4AYVF8TfAcHM= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:T5EsLvEE5UMk62gVSwNY/7XlxknAP3sL8tYRsU68b4s= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1 h1:VkPk2LvyNK8NF9WmAnodrwgQZ3JiYAHFEmPKXUtlX4E= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1/go.mod h1:xzRLiRun3wTzhd+oBg9VkXi/c4PhjBjj73+2vSMH5eM= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1 h1:3G2O21DuY5Y/G32tP1mAI16AxwDYTscG2YaOb/WQty0= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:U4LUlabiYNYBd1pqYS9o8SsHjBRoEBysrfRVnebzJH0= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 h1:kG4riHqlF9X6iZ1Oxs5/6ul6aue7MS+A6DK6HAchuTk= diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 8f65765d6..3dcf39345 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -170,14 +170,10 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria // the height that this block will be at height := s.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess := types.Transactions{} - for _, tx := range req.Transactions { - unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, s.BridgeAddresses(), s.BridgeAllowedAssets()) - if err != nil { - log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) - continue - } - txsToProcess = append(txsToProcess, unmarshalledTx) + txsToProcess, err := shared.UnbundleRollupData(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes()) + if err != nil { + log.Error("failed to unbundle rollup data", "err", err) + return nil, status.Error(codes.InvalidArgument, "Could not unbundle rollup data") } // This set of ordered TXs on the TxPool is has been configured to be used by diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index d96e6a876..1d58db000 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/grpc/shared" "github.com/ethereum/go-ethereum/log" @@ -174,21 +173,10 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, // the height that this block will be at height := o.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess := types.Transactions{} - for _, tx := range req.Transactions { - unmarshalledTx, err := shared.ValidateAndUnmarshalSequencerTx(height, tx, o.BridgeAddresses(), o.BridgeAllowedAssets()) - if err != nil { - log.Debug("failed to validate sequencer tx, ignoring", "tx", tx, "err", err) - continue - } - - err = o.Eth().TxPool().ValidateTx(unmarshalledTx) - if err != nil { - log.Debug("failed to validate tx, ignoring", "tx", tx, "err", err) - continue - } - - txsToProcess = append(txsToProcess, unmarshalledTx) + txsToProcess, err := shared.UnbundleRollupData(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes()) + if err != nil { + log.Error("failed to unbundle rollup data", "err", err) + return nil, status.Error(codes.InvalidArgument, "Could not unbundle rollup data") } // Build a payload to add to the chain diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index ccb0a9961..414e9477e 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -1,8 +1,10 @@ package shared import ( + bundlev1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" + "bytes" "crypto/sha256" "fmt" "github.com/ethereum/go-ethereum/common" @@ -10,6 +12,8 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" "math/big" ) @@ -20,95 +24,166 @@ func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { return lo.Add(lo, hi) } -// `validateAndUnmarshalSequencerTx` validates and unmarshals the given rollup sequencer transaction. -// If the sequencer transaction is a deposit tx, we ensure that the asset ID is allowed and the bridge address is known. -// If the sequencer transaction is not a deposit tx, we unmarshal the sequenced data into an Ethereum transaction. We ensure that the -// tx is not a blob tx or a deposit tx. -func ValidateAndUnmarshalSequencerTx( +func validateAndUnmarshalDepositTx( + deposit *sequencerblockv1.Deposit, height uint64, - tx *sequencerblockv1.RollupData, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, - bridgeAllowedAssets map[string]struct{}, -) (*types.Transaction, error) { - if deposit := tx.GetDeposit(); deposit != nil { - bridgeAddress := deposit.BridgeAddress.GetBech32M() - bac, ok := bridgeAddresses[bridgeAddress] - if !ok { - return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress) - } - - if height < uint64(bac.StartHeight) { - return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight) - } - - if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok { - return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset) - } + bridgeAllowedAssets map[string]struct{}) (*types.Transaction, error) { + bridgeAddress := deposit.BridgeAddress.GetBech32M() + bac, ok := bridgeAddresses[bridgeAddress] + if !ok { + return nil, fmt.Errorf("unknown bridge address: %s", bridgeAddress) + } - if deposit.Asset != bac.AssetDenom { - return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress) - } + if height < uint64(bac.StartHeight) { + return nil, fmt.Errorf("bridging asset %s from bridge %s not allowed before height %d", bac.AssetDenom, bridgeAddress, bac.StartHeight) + } - recipient := common.HexToAddress(deposit.DestinationChainAddress) - amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount)) + if _, ok := bridgeAllowedAssets[deposit.Asset]; !ok { + return nil, fmt.Errorf("disallowed asset %s in deposit tx", deposit.Asset) + } - if bac.Erc20Asset != nil { - log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress) - abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi() - if err != nil { - // this should never happen, as the abi is hardcoded in the contract bindings - return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err) - } + if deposit.Asset != bac.AssetDenom { + return nil, fmt.Errorf("asset %s does not match bridge address %s asset", deposit.Asset, bridgeAddress) + } - // pack arguments for calling the `mint` function on the ERC20 contract - args := []interface{}{recipient, amount} - calldata, err := abi.Pack("mint", args...) - if err != nil { - return nil, err - } + recipient := common.HexToAddress(deposit.DestinationChainAddress) + amount := bac.ScaledDepositAmount(protoU128ToBigInt(deposit.Amount)) - txdata := types.DepositTx{ - From: bac.SenderAddress, - Value: new(big.Int), // don't need to set this, as we aren't minting the native asset - // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer. - // - // the fees are spent from the "bridge account" which is not actually a real account, but is instead some - // address defined by consensus, so the gas cost is not actually deducted from any account. - Gas: 64000, - To: &bac.Erc20Asset.ContractAddress, - Data: calldata, - SourceTransactionId: *deposit.SourceTransactionId, - SourceTransactionIndex: deposit.SourceActionIndex, - } + if bac.Erc20Asset != nil { + log.Debug("creating deposit tx to mint ERC20 asset", "token", bac.AssetDenom, "erc20Address", bac.Erc20Asset.ContractAddress) + abi, err := contracts.AstriaBridgeableERC20MetaData.GetAbi() + if err != nil { + // this should never happen, as the abi is hardcoded in the contract bindings + return nil, fmt.Errorf("failed to get abi for erc20 contract for asset %s: %w", bac.AssetDenom, err) + } - tx := types.NewTx(&txdata) - return tx, nil + // pack arguments for calling the `mint` function on the ERC20 contract + args := []interface{}{recipient, amount} + calldata, err := abi.Pack("mint", args...) + if err != nil { + return nil, err } txdata := types.DepositTx{ - From: bac.SenderAddress, - To: &recipient, - Value: amount, - Gas: 0, + From: bac.SenderAddress, + Value: new(big.Int), // don't need to set this, as we aren't minting the native asset + // mints cost ~14k gas, however this can vary based on existing storage, so we add a little extra as buffer. + // + // the fees are spent from the "bridge account" which is not actually a real account, but is instead some + // address defined by consensus, so the gas cost is not actually deducted from any account. + Gas: 64000, + To: &bac.Erc20Asset.ContractAddress, + Data: calldata, SourceTransactionId: *deposit.SourceTransactionId, SourceTransactionIndex: deposit.SourceActionIndex, } - return types.NewTx(&txdata), nil - } else { - ethTx := new(types.Transaction) - err := ethTx.UnmarshalBinary(tx.GetSequencedData()) + + tx := types.NewTx(&txdata) + return tx, nil + } + + txdata := types.DepositTx{ + From: bac.SenderAddress, + To: &recipient, + Value: amount, + Gas: 0, + SourceTransactionId: *deposit.SourceTransactionId, + SourceTransactionIndex: deposit.SourceActionIndex, + } + return types.NewTx(&txdata), nil +} + +func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*types.Transaction, error) { + ethTx := new(types.Transaction) + err := ethTx.UnmarshalBinary(tx.GetSequencedData()) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData())) + } + + if ethTx.Type() == types.DepositTxType { + return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) + } + + if ethTx.Type() == types.BlobTxType { + return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) + } + + return ethTx, nil +} + +func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, prevBlockHash []byte) (types.Transactions, error) { + processedTxs := types.Transactions{} + allocation := auctionResult.GetAllocation() + + if !bytes.Equal(allocation.PrevRollupBlockHash, prevBlockHash) { + return nil, errors.New("prev block hash do not match in allocation") + } + + // TODO - validate the signature and public key + + // unmarshall the transactions in the bundle + for _, allocationTx := range allocation.GetTransactions() { + ethtx := new(types.Transaction) + err := ethtx.UnmarshalBinary(allocationTx) if err != nil { - return nil, fmt.Errorf("failed to unmarshal sequenced data into transaction: %w. tx hash: %s", err, sha256.Sum256(tx.GetSequencedData())) + return nil, errors.Wrap(err, "failed to unmarshall allocation transaction") } + processedTxs = append(processedTxs, ethtx) + } - if ethTx.Type() == types.DepositTxType { - return nil, fmt.Errorf("deposit tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) - } + return processedTxs, nil - if ethTx.Type() == types.BlobTxType { - return nil, fmt.Errorf("blob tx not allowed in sequenced data. tx hash: %s", sha256.Sum256(tx.GetSequencedData())) - } +} + +// `UnbundleRollupData` takes in a list of rollup data transactions and returns a list of Ethereum transactions. +func UnbundleRollupData(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, + bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte) (types.Transactions, error) { + processedTxs := types.Transactions{} + auctionResultTxs := types.Transactions{} + // we just return the auction result here and do not unmarshall the transactions in the bundle if we find it + var auctionResult *bundlev1alpha1.AuctionResult + for _, tx := range txs { + if deposit := tx.GetDeposit(); deposit != nil { + depositTx, err := validateAndUnmarshalDepositTx(deposit, height, bridgeAddresses, bridgeAllowedAssets) + if err != nil { + return nil, errors.Wrap(err, "failed to validate and unmarshal deposit tx") + } + + processedTxs = append(processedTxs, depositTx) + } else { + sequenceData := tx.GetSequencedData() + // check if sequence data is of type AuctionResult + if auctionResult == nil { + tempAuctionResult := &bundlev1alpha1.AuctionResult{} + err := proto.Unmarshal(sequenceData, tempAuctionResult) + if err == nil { + unmarshalledAuctionResultTxs, err := unmarshallAuctionResultTxs(tempAuctionResult, prevBlockHash) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshall auction result transactions") + } - return ethTx, nil + auctionResult = tempAuctionResult + auctionResultTxs = unmarshalledAuctionResultTxs + } else { + ethtx, err := validateAndUnmarshallSequenceAction(tx) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshall sequence action") + } + processedTxs = append(processedTxs, ethtx) + } + } else { + ethtx, err := validateAndUnmarshallSequenceAction(tx) + if err != nil { + return nil, errors.Wrap(err, "failed to unmarshall sequence action") + } + processedTxs = append(processedTxs, ethtx) + } + } } + + // prepend auctionResultTxs to processedTxs + processedTxs = append(auctionResultTxs, processedTxs...) + + return processedTxs, nil } diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index a46032f4e..61b25dc58 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -1,6 +1,10 @@ package shared import ( + bundlev1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + "bytes" + "crypto/ecdsa" + "github.com/golang/protobuf/proto" "math/big" "testing" @@ -15,6 +19,15 @@ import ( "github.com/stretchr/testify/require" ) +func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Transaction { + return pricedTransaction(nonce, gaslimit, big.NewInt(1), key) +} + +func pricedTransaction(nonce uint64, gaslimit uint64, gasprice *big.Int, key *ecdsa.PrivateKey) *types.Transaction { + tx, _ := types.SignTx(types.NewTransaction(nonce, common.Address{}, big.NewInt(100), gaslimit, gasprice, nil), types.HomesteadSigner{}, key) + return tx +} + func bigIntToProtoU128(i *big.Int) *primitivev1.Uint128 { lo := i.Uint64() hi := new(big.Int).Rsh(i, 64).Uint64() @@ -58,21 +71,102 @@ func generateBech32MAddress() string { return bech32m } -func TestSequenceTxValidation(t *testing.T) { - ethservice, serviceV1Alpha1 := SetupSharedService(t, 10) +func TestUnmarshallAuctionResultTxs(t *testing.T) { + tx1 := transaction(0, 1000, TestKey) + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) - blobTx, err := testBlobTx().MarshalBinary() - require.Nil(t, err, "failed to marshal random blob tx: %v", err) + tx2 := transaction(1, 1000, TestKey) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) - depositTx, err := testDepositTx().MarshalBinary() - require.Nil(t, err, "failed to marshal random deposit tx: %v", err) + tx3 := transaction(2, 1000, TestKey) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + tests := []struct { + description string + auctionResult *bundlev1alpha1.AuctionResult + prevBlockHash []byte + expectedOutput types.Transactions + // just check if error contains the string since error contains other details + wantErr string + }{ + { + description: "previous block hash mismatch", + auctionResult: &bundlev1alpha1.AuctionResult{ + // TODO - add signature and public key validation + Signature: make([]byte, 0), + PublicKey: make([]byte, 0), + Allocation: &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + BaseSequencerBlockHash: []byte("sequencer block hash"), + PrevRollupBlockHash: []byte("prev rollup block hash"), + }, + }, + prevBlockHash: []byte("not prev rollup block hash"), + expectedOutput: types.Transactions{}, + wantErr: "prev block hash do not match in allocation", + }, + { + description: "unmarshallable sequencer tx", + auctionResult: &bundlev1alpha1.AuctionResult{ + Signature: make([]byte, 0), + PublicKey: make([]byte, 0), + Allocation: &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + BaseSequencerBlockHash: []byte("sequencer block hash"), + PrevRollupBlockHash: []byte("prev rollup block hash"), + }, + }, + prevBlockHash: []byte("prev rollup block hash"), + expectedOutput: types.Transactions{}, + wantErr: "failed to unmarshall allocation transaction", + }, + { + description: "valid auction result", + auctionResult: &bundlev1alpha1.AuctionResult{ + Signature: make([]byte, 0), + PublicKey: make([]byte, 0), + Allocation: &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + BaseSequencerBlockHash: []byte("sequencer block hash"), + PrevRollupBlockHash: []byte("prev rollup block hash"), + }, + }, + prevBlockHash: []byte("prev rollup block hash"), + expectedOutput: types.Transactions{tx1, tx2, tx3}, + wantErr: "", + }, + } - unsignedTx := types.NewTransaction(uint64(0), common.HexToAddress("0x9a9070028361F7AAbeB3f2F2Dc07F82C4a98A02a"), big.NewInt(1), params.TxGas, big.NewInt(params.InitialBaseFee*2), nil) - tx, err := types.SignTx(unsignedTx, types.LatestSigner(ethservice.BlockChain().Config()), TestKey) - require.Nil(t, err, "failed to sign tx: %v", err) + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + finalTxs, err := unmarshallAuctionResultTxs(test.auctionResult, test.prevBlockHash) + if test.wantErr == "" && err == nil { + for _, tx := range test.expectedOutput { + foundTx := false + for _, finalTx := range finalTxs { + if bytes.Equal(finalTx.Hash().Bytes(), tx.Hash().Bytes()) { + foundTx = true + } + } + + require.True(t, foundTx, "expected tx not found in final txs") + } + return + } + require.False(t, test.wantErr == "" && err != nil, "expected error, got nil") + require.Contains(t, err.Error(), test.wantErr) + }) + } +} - validMarshalledTx, err := tx.MarshalBinary() - require.Nil(t, err, "failed to marshal valid tx: %v", err) +func TestValidateAndUnmarshallDepositTx(t *testing.T) { + ethservice, serviceV1Alpha1 := SetupSharedService(t, 10) chainDestinationKey, err := crypto.GenerateKey() require.Nil(t, err, "failed to generate chain destination key: %v", err) @@ -92,40 +186,13 @@ func TestSequenceTxValidation(t *testing.T) { tests := []struct { description string - sequencerTx *sequencerblockv1.RollupData + sequencerTx *sequencerblockv1.Deposit // just check if error contains the string since error contains other details wantErr string }{ - { - description: "unmarshallable sequencer tx", - sequencerTx: &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: []byte("unmarshallable tx"), - }, - }, - wantErr: "failed to unmarshal sequenced data into transaction", - }, - { - description: "blob type sequence tx", - sequencerTx: &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: blobTx, - }, - }, - wantErr: "blob tx not allowed in sequenced data", - }, - { - description: "deposit type sequence tx", - sequencerTx: &sequencerblockv1.RollupData{ - Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: depositTx, - }, - }, - wantErr: "deposit tx not allowed in sequenced data", - }, { description: "deposit tx with an unknown bridge address", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: generateBech32MAddress(), }, @@ -137,12 +204,12 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "unknown bridge address", }, { description: "deposit tx with a disallowed asset id", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: bridgeAddress, }, @@ -154,12 +221,12 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "disallowed asset", }, { description: "deposit tx with a height and asset below the bridge start height", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: invalidHeightBridgeAddressBech32m, }, @@ -171,12 +238,12 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "not allowed before height", }, { description: "valid deposit tx", - sequencerTx: &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + sequencerTx: &sequencerblockv1.Deposit{ BridgeAddress: &primitivev1.Address{ Bech32M: bridgeAddress, }, @@ -188,9 +255,67 @@ func TestSequenceTxValidation(t *testing.T) { Inner: "test_tx_hash", }, SourceActionIndex: 0, - }}}, + }, wantErr: "", }, + } + + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + _, err := validateAndUnmarshalDepositTx(test.sequencerTx, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets()) + if test.wantErr == "" && err == nil { + return + } + require.False(t, test.wantErr == "" && err != nil, "expected error, got nil") + require.Contains(t, err.Error(), test.wantErr) + }) + } +} + +func TestValidateAndUnmarshallSequenceAction(t *testing.T) { + blobTx, err := testBlobTx().MarshalBinary() + require.Nil(t, err, "failed to marshal random blob tx: %v", err) + + depositTx, err := testDepositTx().MarshalBinary() + require.Nil(t, err, "failed to marshal random deposit tx: %v", err) + + tx1 := transaction(0, 1000, TestKey) + validMarshalledTx, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + tests := []struct { + description string + sequencerTx *sequencerblockv1.RollupData + // just check if error contains the string since errors can contains other details + wantErr string + }{ + { + description: "unmarshallable sequencer tx", + sequencerTx: &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: []byte("unmarshallable tx"), + }, + }, + wantErr: "failed to unmarshal sequenced data into transaction", + }, + { + description: "blob type sequence tx", + sequencerTx: &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: blobTx, + }, + }, + wantErr: "blob tx not allowed in sequenced data", + }, + { + description: "deposit type sequence tx", + sequencerTx: &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: depositTx, + }, + }, + wantErr: "deposit tx not allowed in sequenced data", + }, { description: "valid sequencer tx", sequencerTx: &sequencerblockv1.RollupData{ @@ -202,7 +327,7 @@ func TestSequenceTxValidation(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - _, err := ValidateAndUnmarshalSequencerTx(2, test.sequencerTx, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets()) + _, err := validateAndUnmarshallSequenceAction(test.sequencerTx) if test.wantErr == "" && err == nil { return } @@ -211,3 +336,88 @@ func TestSequenceTxValidation(t *testing.T) { }) } } + +func TestUnbundleRollupData(t *testing.T) { + ethservice, serviceV1Alpha1 := SetupSharedService(t, 10) + + baseSequencerBlockHash := []byte("sequencer block hash") + prevRollupBlockHash := []byte("prev rollup block hash") + + // txs in + tx1 := transaction(0, 1000, TestKey) + tx2 := transaction(1, 1000, TestKey) + tx3 := transaction(2, 1000, TestKey) + tx4 := transaction(3, 1000, TestKey) + tx5 := transaction(4, 1000, TestKey) + + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx4, err := tx4.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx5, err := tx5.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + auctionResult := &bundlev1alpha1.AuctionResult{ + Signature: make([]byte, 0), + PublicKey: make([]byte, 0), + Allocation: &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + BaseSequencerBlockHash: baseSequencerBlockHash, + PrevRollupBlockHash: prevRollupBlockHash, + }, + } + marshalledAuctionResult, err := proto.Marshal(auctionResult) + require.NoError(t, err, "failed to marshal auction result: %v", err) + auctionResultSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAuctionResult, + }, + } + seqData1 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx4, + }, + } + seqData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx5, + }, + } + + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + chainDestinationKey, err := crypto.GenerateKey() + require.Nil(t, err, "failed to generate chain destination key: %v", err) + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey) + + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)), + RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)}, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, auctionResultSequenceData, depositTx} + + txsToProcess, err := UnbundleRollupData(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash) + require.NoError(t, err, "failed to unbundle rollup data: %v", err) + + require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") + + // auction result txs should be the first 3 + require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") + require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") + require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") +} From c4662a75eb9f7ced398e26c0e475f25cb389bcdc Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 13 Nov 2024 18:44:56 +0530 Subject: [PATCH 56/79] add signature verification --- go.mod | 2 + go.sum | 4 ++ grpc/shared/validation.go | 11 +++++- grpc/shared/validation_test.go | 69 ++++++++++++++++++++++++---------- 4 files changed, 65 insertions(+), 21 deletions(-) diff --git a/go.mod b/go.mod index 946712d50..2e6fe961d 100644 --- a/go.mod +++ b/go.mod @@ -85,6 +85,7 @@ require ( ) require ( + filippo.io/edwards25519 v1.0.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/DataDog/zstd v1.4.5 // indirect @@ -120,6 +121,7 @@ require ( github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-retryablehttp v0.7.4 // indirect + github.com/hdevalence/ed25519consensus v0.2.0 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/klauspost/compress v1.15.15 // indirect diff --git a/go.sum b/go.sum index a5c55efb7..7f7f15830 100644 --- a/go.sum +++ b/go.sum @@ -43,6 +43,8 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= +filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0 h1:8q4SaHjFsClSvuVne0ID/5Ka8u3fcIHyqkLjcFpNRHQ= github.com/Azure/azure-sdk-for-go/sdk/azcore v1.7.0/go.mod h1:bjGvMhVMb+EEm3VRNQawDMUyMMjo+S5ewNjflkep/0Q= github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.3.0 h1:vcYCAze6p19qBW7MhZybIsqD8sMV8js0NyQM8JDnVtg= @@ -343,6 +345,8 @@ github.com/hashicorp/go-retryablehttp v0.7.4 h1:ZQgVdpTdAL7WpMIwLzCfbalOcSUdkDZn github.com/hashicorp/go-retryablehttp v0.7.4/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hdevalence/ed25519consensus v0.2.0 h1:37ICyZqdyj0lAZ8P4D1d1id3HqbbG1N3iBb1Tb4rdcU= +github.com/hdevalence/ed25519consensus v0.2.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index 414e9477e..ec66a1044 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -5,6 +5,7 @@ import ( primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" "bytes" + "crypto/ed25519" "crypto/sha256" "fmt" "github.com/ethereum/go-ethereum/common" @@ -120,7 +121,15 @@ func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, pre return nil, errors.New("prev block hash do not match in allocation") } - // TODO - validate the signature and public key + message, err := proto.Marshal(auctionResult.GetAllocation()) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal allocation") + } + publicKey := ed25519.PublicKey(auctionResult.GetPublicKey()) + signature := auctionResult.GetSignature() + if !ed25519.Verify(publicKey, message, signature) { + return nil, errors.New("failed to verify signature") + } // unmarshall the transactions in the bundle for _, allocationTx := range allocation.GetTransactions() { diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 61b25dc58..1fcdbeb83 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -4,6 +4,7 @@ import ( bundlev1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" "bytes" "crypto/ecdsa" + "crypto/ed25519" "github.com/golang/protobuf/proto" "math/big" "testing" @@ -84,6 +85,25 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { validMarshalledTx3, err := tx3.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) + pubkey, privkey, err := ed25519.GenerateKey(nil) + require.NoError(t, err, "failed to generate public and private key") + + validAllocation := &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + BaseSequencerBlockHash: []byte("sequencer block hash"), + PrevRollupBlockHash: []byte("prev rollup block hash"), + } + + marshalledAllocation, err := proto.Marshal(validAllocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + + signedAllocation, err := privkey.Sign(nil, marshalledAllocation, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) + tests := []struct { description string auctionResult *bundlev1alpha1.AuctionResult @@ -110,10 +130,10 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { wantErr: "prev block hash do not match in allocation", }, { - description: "unmarshallable sequencer tx", + description: "invalid signature", auctionResult: &bundlev1alpha1.AuctionResult{ - Signature: make([]byte, 0), - PublicKey: make([]byte, 0), + Signature: []byte("invalid signature"), + PublicKey: pubkey, Allocation: &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, @@ -123,19 +143,14 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { }, prevBlockHash: []byte("prev rollup block hash"), expectedOutput: types.Transactions{}, - wantErr: "failed to unmarshall allocation transaction", + wantErr: "failed to verify signature", }, { description: "valid auction result", auctionResult: &bundlev1alpha1.AuctionResult{ - Signature: make([]byte, 0), - PublicKey: make([]byte, 0), - Allocation: &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, - BaseSequencerBlockHash: []byte("sequencer block hash"), - PrevRollupBlockHash: []byte("prev rollup block hash"), - }, + Signature: signedAllocation, + PublicKey: pubkey, + Allocation: validAllocation, }, prevBlockHash: []byte("prev rollup block hash"), expectedOutput: types.Transactions{tx1, tx2, tx3}, @@ -361,16 +376,30 @@ func TestUnbundleRollupData(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) + pubKey, privKey, err := ed25519.GenerateKey(nil) + require.NoError(t, err, "failed to generate ed25519 key") + + allocation := &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + BaseSequencerBlockHash: baseSequencerBlockHash, + PrevRollupBlockHash: prevRollupBlockHash, + } + + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + signedAllocation, err := privKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) + auctionResult := &bundlev1alpha1.AuctionResult{ - Signature: make([]byte, 0), - PublicKey: make([]byte, 0), - Allocation: &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, - BaseSequencerBlockHash: baseSequencerBlockHash, - PrevRollupBlockHash: prevRollupBlockHash, - }, + Signature: signedAllocation, + PublicKey: pubKey, + Allocation: allocation, } + marshalledAuctionResult, err := proto.Marshal(auctionResult) require.NoError(t, err, "failed to marshal auction result: %v", err) auctionResultSequenceData := &sequencerblockv1.RollupData{ From 64758bbc6897562381dd7e042ff6f408a84cd89e Mon Sep 17 00:00:00 2001 From: Bharath Date: Sun, 17 Nov 2024 20:52:22 +0530 Subject: [PATCH 57/79] set the trusted builder public key in genesis --- genesis.json | 1 + grpc/execution/server.go | 11 ++++++----- grpc/execution/server_test.go | 16 ++++++++-------- grpc/optimistic/server.go | 11 ++++++----- grpc/optimistic/server_test.go | 8 ++++---- grpc/shared/container.go | 28 +++++++++++++++++++++++----- grpc/shared/test_setup.go | 10 +++++++--- grpc/shared/test_utils.go | 12 ++++++++++-- grpc/shared/validation.go | 30 ++++++++++++++++++++---------- grpc/shared/validation_test.go | 27 +++++++++++---------------- params/config.go | 1 + 11 files changed, 97 insertions(+), 58 deletions(-) diff --git a/genesis.json b/genesis.json index d4ed69eac..777d9c8d9 100644 --- a/genesis.json +++ b/genesis.json @@ -18,6 +18,7 @@ "astriaRollupName": "astria", "astriaOverrideGenesisExtraData": true, "astriaSequencerInitialHeight": 2, + "astriaTrustedBuilderPubKey": "", "astriaSequencerAddressPrefix": "astria", "astriaCelestiaInitialHeight": 2, "astriaCelestiaHeightVariance": 10, diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 3dcf39345..5befdad20 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -6,6 +6,7 @@ package execution import ( "context" + "crypto/ed25519" "crypto/sha256" "fmt" "github.com/ethereum/go-ethereum/eth" @@ -170,11 +171,7 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria // the height that this block will be at height := s.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess, err := shared.UnbundleRollupData(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes()) - if err != nil { - log.Error("failed to unbundle rollup data", "err", err) - return nil, status.Error(codes.InvalidArgument, "Could not unbundle rollup data") - } + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes(), s.TrustedBuilderPublicKey()) // This set of ordered TXs on the TxPool is has been configured to be used by // the Miner when building a payload. @@ -434,3 +431,7 @@ func (s *ExecutionServiceServerV1) BridgeAllowedAssets() map[string]struct{} { func (s *ExecutionServiceServerV1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } + +func (s *ExecutionServiceServerV1) TrustedBuilderPublicKey() ed25519.PublicKey { + return s.sharedServiceContainer.TrustedBuilderPublicKey() +} diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index fc5128229..4b910e576 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -22,7 +22,7 @@ import ( ) func TestExecutionService_GetGenesisInfo(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) @@ -37,7 +37,7 @@ func TestExecutionService_GetGenesisInfo(t *testing.T) { } func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) @@ -64,7 +64,7 @@ func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { } func TestExecutionService_GetBlock(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { @@ -124,7 +124,7 @@ func TestExecutionService_GetBlock(t *testing.T) { } func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { @@ -196,7 +196,7 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { } func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { - ethservice, _ := shared.SetupSharedService(t, 10) + ethservice, _, _ := shared.SetupSharedService(t, 10) tests := []struct { description string @@ -248,7 +248,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { // reset the blockchain with each test - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) var err error // adding this to prevent shadowing of genesisInfo in the below if branch @@ -342,7 +342,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { } func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info @@ -479,7 +479,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi // Check that invalid transactions are not added into a block and are removed from the mempool func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) { - ethservice, sharedServiceContainer := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 1d58db000..c2e78ba2d 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -5,6 +5,7 @@ import ( optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" "context" + "crypto/ed25519" "errors" "fmt" "github.com/ethereum/go-ethereum/beacon/engine" @@ -173,11 +174,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, // the height that this block will be at height := o.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess, err := shared.UnbundleRollupData(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes()) - if err != nil { - log.Error("failed to unbundle rollup data", "err", err) - return nil, status.Error(codes.InvalidArgument, "Could not unbundle rollup data") - } + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes(), o.TrustedBuilderPublicKey()) // Build a payload to add to the chain payloadAttributes := &miner.BuildPayloadArgs{ @@ -278,3 +275,7 @@ func (s *OptimisticServiceV1Alpha1) BridgeAllowedAssets() map[string]struct{} { func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } + +func (s *OptimisticServiceV1Alpha1) TrustedBuilderPublicKey() ed25519.PublicKey { + return s.sharedServiceContainer.TrustedBuilderPublicKey() +} diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index cbcb562fc..d048c9728 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -24,7 +24,7 @@ import ( ) func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { - ethService, _ := shared.SetupSharedService(t, 10) + ethService, _, _ := shared.SetupSharedService(t, 10) tests := []struct { description string @@ -66,7 +66,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - ethservice, sharedService := shared.SetupSharedService(t, 10) + ethservice, sharedService, _ := shared.SetupSharedService(t, 10) // reset the blockchain with each test optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) @@ -194,7 +194,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { } func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { - ethservice, sharedService := shared.SetupSharedService(t, 10) + ethservice, sharedService, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) executionServiceV1 := execution.SetupExecutionService(t, sharedService) @@ -358,7 +358,7 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { } func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing.T) { - ethservice, sharedService := shared.SetupSharedService(t, 10) + ethservice, sharedService, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) executionServiceV1 := execution.SetupExecutionService(t, sharedService) diff --git a/grpc/shared/container.go b/grpc/shared/container.go index 04e4568e5..f15ea1993 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -1,6 +1,7 @@ package shared import ( + "crypto/ed25519" "errors" "fmt" "github.com/ethereum/go-ethereum/common" @@ -24,6 +25,8 @@ type SharedServiceContainer struct { bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty + trustedBuilderPublicKey ed25519.PublicKey + // TODO: bharath - we could make this an atomic pointer??? nextFeeRecipient common.Address // Fee recipient for the next block } @@ -96,12 +99,23 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro } } } + + // TODO - is it desirable to not fail if the trusted builder public key is not set? + if bc.Config().AstriaTrustedBuilderPublicKey == "" { + return nil, errors.New("trusted builder public key not set") + } + // validate if its an ed25519 public key + if len(bc.Config().AstriaTrustedBuilderPublicKey) != ed25519.PublicKeySize { + return nil, errors.New("trusted builder public key is not a valid ed25519 public key") + } + sharedServiceContainer := &SharedServiceContainer{ - eth: eth, - bc: bc, - bridgeAddresses: bridgeAddresses, - bridgeAllowedAssets: bridgeAllowedAssets, - nextFeeRecipient: nextFeeRecipient, + eth: eth, + bc: bc, + bridgeAddresses: bridgeAddresses, + bridgeAllowedAssets: bridgeAllowedAssets, + nextFeeRecipient: nextFeeRecipient, + trustedBuilderPublicKey: ed25519.PublicKey(bc.Config().AstriaTrustedBuilderPublicKey), } return sharedServiceContainer, nil @@ -159,3 +173,7 @@ func (s *SharedServiceContainer) BridgeAddresses() map[string]*params.AstriaBrid func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} { return s.bridgeAllowedAssets } + +func (s *SharedServiceContainer) TrustedBuilderPublicKey() ed25519.PublicKey { + return s.trustedBuilderPublicKey +} diff --git a/grpc/shared/test_setup.go b/grpc/shared/test_setup.go index 5fb0aec21..ead5ababd 100644 --- a/grpc/shared/test_setup.go +++ b/grpc/shared/test_setup.go @@ -1,15 +1,16 @@ package shared import ( + "crypto/ed25519" "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/eth" "github.com/stretchr/testify/require" "testing" ) -func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer) { +func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer, ed25519.PrivateKey) { t.Helper() - genesis, blocks, bridgeAddress, feeCollectorKey := GenerateMergeChain(noOfBlocksToGenerate, true) + genesis, blocks, bridgeAddress, feeCollectorKey, trustedBuilderPrivkey := GenerateMergeChain(noOfBlocksToGenerate, true) ethservice := StartEthService(t, genesis) sharedService, err := NewSharedServiceContainer(ethservice) @@ -28,5 +29,8 @@ func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, _, err = ethservice.BlockChain().InsertChain(blocks) require.Nil(t, err, "can't insert blocks") - return ethservice, sharedService + // FIXME - this interface isn't right for the tests, we shouldn't be exposing the trusted builder priv key like this + // we should instead allow the test to create it and pass it to the shared service container in the constructor + // but that can make the codebase a bit weird, so we can leave it like this for now + return ethservice, sharedService, trustedBuilderPrivkey } diff --git a/grpc/shared/test_utils.go b/grpc/shared/test_utils.go index 0c95927a7..8fd4f9c92 100644 --- a/grpc/shared/test_utils.go +++ b/grpc/shared/test_utils.go @@ -3,6 +3,7 @@ package shared import ( primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" "crypto/ecdsa" + "crypto/ed25519" "math/big" "testing" "time" @@ -36,7 +37,7 @@ var ( testBalance = big.NewInt(2e18) ) -func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey) { +func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey, ed25519.PrivateKey) { config := *params.AllEthashProtocolChanges engine := consensus.Engine(beaconConsensus.New(ethash.NewFaker())) if merged { @@ -55,12 +56,19 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri panic(err) } + trustedBuilderPubkey, trustedBuilderPrivkey, err := ed25519.GenerateKey(nil) + if err != nil { + panic(err) + } + config.AstriaRollupName = "astria" config.AstriaSequencerAddressPrefix = "astria" config.AstriaSequencerInitialHeight = 10 config.AstriaCelestiaInitialHeight = 10 config.AstriaCelestiaHeightVariance = 10 + config.AstriaTrustedBuilderPublicKey = string(trustedBuilderPubkey) + bech32mBridgeAddress, err := bech32.EncodeM(config.AstriaSequencerAddressPrefix, bridgeAddressBytes) if err != nil { panic(err) @@ -114,7 +122,7 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri config.TerminalTotalDifficulty = totalDifficulty } - return genesis, blocks, bech32mBridgeAddress, feeCollectorKey + return genesis, blocks, bech32mBridgeAddress, feeCollectorKey, trustedBuilderPrivkey } // startEthService creates a full node instance for testing. diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index ec66a1044..14317c81c 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -113,7 +113,7 @@ func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*type return ethTx, nil } -func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, prevBlockHash []byte) (types.Transactions, error) { +func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, prevBlockHash []byte, trustedBuilderPubKey ed25519.PublicKey) (types.Transactions, error) { processedTxs := types.Transactions{} allocation := auctionResult.GetAllocation() @@ -125,7 +125,12 @@ func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, pre if err != nil { return nil, errors.Wrap(err, "failed to marshal allocation") } + publicKey := ed25519.PublicKey(auctionResult.GetPublicKey()) + if !trustedBuilderPubKey.Equal(publicKey) { + return nil, errors.New("public key in auction result does not match trusted builder public key") + } + signature := auctionResult.GetSignature() if !ed25519.Verify(publicKey, message, signature) { return nil, errors.New("failed to verify signature") @@ -145,9 +150,9 @@ func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, pre } -// `UnbundleRollupData` takes in a list of rollup data transactions and returns a list of Ethereum transactions. -func UnbundleRollupData(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, - bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte) (types.Transactions, error) { +// `UnbundleRollupDataTransactions` takes in a list of rollup data transactions and returns a list of Ethereum transactions. +func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, + bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, trustedBuilderPubKey ed25519.PublicKey) types.Transactions { processedTxs := types.Transactions{} auctionResultTxs := types.Transactions{} // we just return the auction result here and do not unmarshall the transactions in the bundle if we find it @@ -156,7 +161,8 @@ func UnbundleRollupData(txs []*sequencerblockv1.RollupData, height uint64, bridg if deposit := tx.GetDeposit(); deposit != nil { depositTx, err := validateAndUnmarshalDepositTx(deposit, height, bridgeAddresses, bridgeAllowedAssets) if err != nil { - return nil, errors.Wrap(err, "failed to validate and unmarshal deposit tx") + log.Error("failed to validate and unmarshal deposit tx", "error", err) + continue } processedTxs = append(processedTxs, depositTx) @@ -164,12 +170,14 @@ func UnbundleRollupData(txs []*sequencerblockv1.RollupData, height uint64, bridg sequenceData := tx.GetSequencedData() // check if sequence data is of type AuctionResult if auctionResult == nil { + // TODO - check if we can avoid a temp value tempAuctionResult := &bundlev1alpha1.AuctionResult{} err := proto.Unmarshal(sequenceData, tempAuctionResult) if err == nil { - unmarshalledAuctionResultTxs, err := unmarshallAuctionResultTxs(tempAuctionResult, prevBlockHash) + unmarshalledAuctionResultTxs, err := unmarshallAuctionResultTxs(tempAuctionResult, prevBlockHash, trustedBuilderPubKey) if err != nil { - return nil, errors.Wrap(err, "failed to unmarshall auction result transactions") + log.Error("failed to unmarshall auction result transactions", "error", err) + continue } auctionResult = tempAuctionResult @@ -177,14 +185,16 @@ func UnbundleRollupData(txs []*sequencerblockv1.RollupData, height uint64, bridg } else { ethtx, err := validateAndUnmarshallSequenceAction(tx) if err != nil { - return nil, errors.Wrap(err, "failed to unmarshall sequence action") + log.Error("failed to unmarshall sequence action", "error", err) + continue } processedTxs = append(processedTxs, ethtx) } } else { ethtx, err := validateAndUnmarshallSequenceAction(tx) if err != nil { - return nil, errors.Wrap(err, "failed to unmarshall sequence action") + log.Error("failed to unmarshall sequence action", "error", err) + continue } processedTxs = append(processedTxs, ethtx) } @@ -194,5 +204,5 @@ func UnbundleRollupData(txs []*sequencerblockv1.RollupData, height uint64, bridg // prepend auctionResultTxs to processedTxs processedTxs = append(auctionResultTxs, processedTxs...) - return processedTxs, nil + return processedTxs } diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 1fcdbeb83..3cddfbf28 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -73,6 +73,8 @@ func generateBech32MAddress() string { } func TestUnmarshallAuctionResultTxs(t *testing.T) { + _, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) + tx1 := transaction(0, 1000, TestKey) validMarshalledTx1, err := tx1.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) @@ -85,9 +87,6 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { validMarshalledTx3, err := tx3.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - pubkey, privkey, err := ed25519.GenerateKey(nil) - require.NoError(t, err, "failed to generate public and private key") - validAllocation := &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, @@ -98,7 +97,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { marshalledAllocation, err := proto.Marshal(validAllocation) require.NoError(t, err, "failed to marshal allocation: %v", err) - signedAllocation, err := privkey.Sign(nil, marshalledAllocation, &ed25519.Options{ + signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ Hash: 0, Context: "", }) @@ -133,7 +132,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { description: "invalid signature", auctionResult: &bundlev1alpha1.AuctionResult{ Signature: []byte("invalid signature"), - PublicKey: pubkey, + PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), Allocation: &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, @@ -149,7 +148,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { description: "valid auction result", auctionResult: &bundlev1alpha1.AuctionResult{ Signature: signedAllocation, - PublicKey: pubkey, + PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), Allocation: validAllocation, }, prevBlockHash: []byte("prev rollup block hash"), @@ -160,7 +159,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - finalTxs, err := unmarshallAuctionResultTxs(test.auctionResult, test.prevBlockHash) + finalTxs, err := unmarshallAuctionResultTxs(test.auctionResult, test.prevBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) if test.wantErr == "" && err == nil { for _, tx := range test.expectedOutput { foundTx := false @@ -181,7 +180,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { } func TestValidateAndUnmarshallDepositTx(t *testing.T) { - ethservice, serviceV1Alpha1 := SetupSharedService(t, 10) + ethservice, serviceV1Alpha1, _ := SetupSharedService(t, 10) chainDestinationKey, err := crypto.GenerateKey() require.Nil(t, err, "failed to generate chain destination key: %v", err) @@ -353,7 +352,7 @@ func TestValidateAndUnmarshallSequenceAction(t *testing.T) { } func TestUnbundleRollupData(t *testing.T) { - ethservice, serviceV1Alpha1 := SetupSharedService(t, 10) + ethservice, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) baseSequencerBlockHash := []byte("sequencer block hash") prevRollupBlockHash := []byte("prev rollup block hash") @@ -376,9 +375,6 @@ func TestUnbundleRollupData(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - pubKey, privKey, err := ed25519.GenerateKey(nil) - require.NoError(t, err, "failed to generate ed25519 key") - allocation := &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, @@ -388,7 +384,7 @@ func TestUnbundleRollupData(t *testing.T) { marshalledAllocation, err := proto.Marshal(allocation) require.NoError(t, err, "failed to marshal allocation: %v", err) - signedAllocation, err := privKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ Hash: 0, Context: "", }) @@ -396,7 +392,7 @@ func TestUnbundleRollupData(t *testing.T) { auctionResult := &bundlev1alpha1.AuctionResult{ Signature: signedAllocation, - PublicKey: pubKey, + PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), Allocation: allocation, } @@ -440,8 +436,7 @@ func TestUnbundleRollupData(t *testing.T) { finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, auctionResultSequenceData, depositTx} - txsToProcess, err := UnbundleRollupData(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash) - require.NoError(t, err, "failed to unbundle rollup data: %v", err) + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") diff --git a/params/config.go b/params/config.go index d9b40b881..efdbca0fe 100644 --- a/params/config.go +++ b/params/config.go @@ -388,6 +388,7 @@ type ChainConfig struct { AstriaBridgeAddressConfigs []AstriaBridgeAddressConfig `json:"astriaBridgeAddresses,omitempty"` AstriaFeeCollectors map[uint32]common.Address `json:"astriaFeeCollectors"` AstriaEIP1559Params *AstriaEIP1559Params `json:"astriaEIP1559Params,omitempty"` + AstriaTrustedBuilderPublicKey string `json:"astriaTrustedBuilderPublicKey,omitempty"` } func (c *ChainConfig) AstriaExtraData() []byte { From 6985314f06cf6c6686b586a8e949b65259163121 Mon Sep 17 00:00:00 2001 From: Bharath Date: Sun, 17 Nov 2024 21:58:14 +0530 Subject: [PATCH 58/79] add some tests for the auction results --- grpc/shared/validation_test.go | 260 +++++++++++++++++++++++++++++++++ 1 file changed, 260 insertions(+) diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 3cddfbf28..71055ef53 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -128,6 +128,22 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { expectedOutput: types.Transactions{}, wantErr: "prev block hash do not match in allocation", }, + { + description: "public key doesn't match", + auctionResult: &bundlev1alpha1.AuctionResult{ + Signature: []byte("invalid signature"), + PublicKey: []byte("invalid public key"), + Allocation: &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + BaseSequencerBlockHash: []byte("sequencer block hash"), + PrevRollupBlockHash: []byte("prev rollup block hash"), + }, + }, + prevBlockHash: []byte("prev rollup block hash"), + expectedOutput: types.Transactions{}, + wantErr: "public key in auction result does not match trusted builder public key", + }, { description: "invalid signature", auctionResult: &bundlev1alpha1.AuctionResult{ @@ -444,4 +460,248 @@ func TestUnbundleRollupData(t *testing.T) { require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") + require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth") + require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") +} + +func TestUnbundleRollupDataWithDuplicateAuctionResults(t *testing.T) { + ethservice, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) + + baseSequencerBlockHash := []byte("sequencer block hash") + prevRollupBlockHash := []byte("prev rollup block hash") + + // txs in + tx1 := transaction(0, 1000, TestKey) + tx2 := transaction(1, 1000, TestKey) + tx3 := transaction(2, 1000, TestKey) + tx4 := transaction(3, 1000, TestKey) + tx5 := transaction(4, 1000, TestKey) + + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx4, err := tx4.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx5, err := tx5.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + allocation := &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + BaseSequencerBlockHash: baseSequencerBlockHash, + PrevRollupBlockHash: prevRollupBlockHash, + } + + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) + + auctionResult := &bundlev1alpha1.AuctionResult{ + Signature: signedAllocation, + PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), + Allocation: allocation, + } + + marshalledAuctionResult, err := proto.Marshal(auctionResult) + require.NoError(t, err, "failed to marshal auction result: %v", err) + auctionResultSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAuctionResult, + }, + } + // this auction result should be ignored + auctionResultSequenceData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAuctionResult, + }, + } + seqData1 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx4, + }, + } + seqData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx5, + }, + } + + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + chainDestinationKey, err := crypto.GenerateKey() + require.Nil(t, err, "failed to generate chain destination key: %v", err) + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey) + + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)), + RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)}, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, auctionResultSequenceData, auctionResultSequenceData2, depositTx} + + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) + + require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") + + // auction result txs should be the first 3 + require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") + require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") + require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") + require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth") + require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") +} + +func TestUnbundleRollupDataWithDuplicateInvalidAuctionResults(t *testing.T) { + ethservice, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) + + baseSequencerBlockHash := []byte("sequencer block hash") + prevRollupBlockHash := []byte("prev rollup block hash") + + _, invalidTrustedBuilderprivkey, err := ed25519.GenerateKey(nil) + require.Nil(t, err, "failed to generate invalid trusted builder key: %v", err) + + // txs in + tx1 := transaction(0, 1000, TestKey) + tx2 := transaction(1, 1000, TestKey) + tx3 := transaction(2, 1000, TestKey) + tx4 := transaction(3, 1000, TestKey) + tx5 := transaction(4, 1000, TestKey) + + validMarshalledTx1, err := tx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx2, err := tx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx3, err := tx3.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx4, err := tx4.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + validMarshalledTx5, err := tx5.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + // transactions that the attacker is trying to get into the top of block + invalidTx1 := transaction(5, 1000, TestKey) + invalidMarshalledTx1, err := invalidTx1.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + invalidTx2 := transaction(6, 1000, TestKey) + invalidMarshalledTx2, err := invalidTx2.MarshalBinary() + require.NoError(t, err, "failed to marshal valid tx: %v", err) + + allocation := &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + BaseSequencerBlockHash: baseSequencerBlockHash, + PrevRollupBlockHash: prevRollupBlockHash, + } + + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) + + invalidAllocation := &bundlev1alpha1.Bundle{ + Fee: 100, + Transactions: [][]byte{invalidMarshalledTx1, invalidMarshalledTx2}, + BaseSequencerBlockHash: baseSequencerBlockHash, + PrevRollupBlockHash: prevRollupBlockHash, + } + marshalledInvalidAllocation, err := proto.Marshal(invalidAllocation) + require.NoError(t, err, "failed to marshal invalid allocation: %v", err) + + signedInvalidAllocation, err := invalidTrustedBuilderprivkey.Sign(nil, marshalledInvalidAllocation, &ed25519.Options{ + Hash: 0, + Context: "", + }) + require.NoError(t, err, "failed to sign allocation: %v", err) + + auctionResult := &bundlev1alpha1.AuctionResult{ + Signature: signedAllocation, + PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), + Allocation: allocation, + } + + marshalledAuctionResult, err := proto.Marshal(auctionResult) + require.NoError(t, err, "failed to marshal auction result: %v", err) + + invalidAuctionResult := &bundlev1alpha1.AuctionResult{ + Signature: signedInvalidAllocation, + // trying to spoof the actual trusted builder key + PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), + Allocation: invalidAllocation, + } + marshalledInvalidAuctionResult, err := proto.Marshal(invalidAuctionResult) + require.NoError(t, err, "failed to marshal invalid auction result: %v", err) + + auctionResultSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledAuctionResult, + }, + } + // this auction result should be ignored + invalidAuctionResultSequenceData := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: marshalledInvalidAuctionResult, + }, + } + seqData1 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx4, + }, + } + seqData2 := &sequencerblockv1.RollupData{ + Value: &sequencerblockv1.RollupData_SequencedData{ + SequencedData: validMarshalledTx5, + }, + } + + bridgeAddress := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].BridgeAddress + bridgeAssetDenom := ethservice.BlockChain().Config().AstriaBridgeAddressConfigs[0].AssetDenom + chainDestinationKey, err := crypto.GenerateKey() + require.Nil(t, err, "failed to generate chain destination key: %v", err) + chainDestinationAddress := crypto.PubkeyToAddress(chainDestinationKey.PublicKey) + + depositTx := &sequencerblockv1.RollupData{Value: &sequencerblockv1.RollupData_Deposit{Deposit: &sequencerblockv1.Deposit{ + BridgeAddress: &primitivev1.Address{ + Bech32M: bridgeAddress, + }, + Asset: bridgeAssetDenom, + Amount: bigIntToProtoU128(big.NewInt(1000000000000000000)), + RollupId: &primitivev1.RollupId{Inner: make([]byte, 0)}, + DestinationChainAddress: chainDestinationAddress.String(), + SourceTransactionId: &primitivev1.TransactionId{ + Inner: "test_tx_hash", + }, + SourceActionIndex: 0, + }}} + + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, auctionResultSequenceData, invalidAuctionResultSequenceData, depositTx} + + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) + + require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") + + // auction result txs should be the first 3 + require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") + require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") + require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") + require.True(t, bytes.Equal(txsToProcess[3].Hash().Bytes(), tx4.Hash().Bytes()), "expected tx4 to be fourth") + require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") } From 3661b01f030419c2867f3e0bbcc67ee89d081702 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 19 Nov 2024 20:16:47 +0530 Subject: [PATCH 59/79] dilineate trusted builder public keys by block number --- genesis.json | 4 +++- grpc/optimistic/server.go | 12 ++++++++++ grpc/shared/container.go | 46 +++++++++++++++++++++++++-------------- grpc/shared/test_utils.go | 3 ++- params/config.go | 2 +- 5 files changed, 48 insertions(+), 19 deletions(-) diff --git a/genesis.json b/genesis.json index 777d9c8d9..62123f5ad 100644 --- a/genesis.json +++ b/genesis.json @@ -18,7 +18,6 @@ "astriaRollupName": "astria", "astriaOverrideGenesisExtraData": true, "astriaSequencerInitialHeight": 2, - "astriaTrustedBuilderPubKey": "", "astriaSequencerAddressPrefix": "astria", "astriaCelestiaInitialHeight": 2, "astriaCelestiaHeightVariance": 10, @@ -41,6 +40,9 @@ } } ], + "astriaTrustedBuilderPublicKeys": { + "1": "" + }, "astriaFeeCollectors": { "1": "0xaC21B97d35Bf75A7dAb16f35b111a50e78A72F30" }, diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index c2e78ba2d..92ebc2d63 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -218,6 +218,14 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, }, } + if publicKey, ok := o.Bc().Config().AstriaTrustedBuilderPublicKeys[res.Number+1]; ok { + if len(publicKey) != ed25519.PublicKeySize { + log.Error("trusted builder public key is not a valid ed25519 public", "block", res.Number+1, "publicKey", publicKey) + } + + o.SetTrustedBuilderPublicKey(ed25519.PublicKey(publicKey)) + } + log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) executeOptimisticBlockSuccessCount.Inc(1) @@ -279,3 +287,7 @@ func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool { func (s *OptimisticServiceV1Alpha1) TrustedBuilderPublicKey() ed25519.PublicKey { return s.sharedServiceContainer.TrustedBuilderPublicKey() } + +func (s *OptimisticServiceV1Alpha1) SetTrustedBuilderPublicKey(trustedBuilderPublicKey ed25519.PublicKey) { + s.sharedServiceContainer.SetTrustedBuilderPublicKey(trustedBuilderPublicKey) +} diff --git a/grpc/shared/container.go b/grpc/shared/container.go index f15ea1993..320eeeda6 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -10,6 +10,7 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "sync" + "sync/atomic" ) type SharedServiceContainer struct { @@ -25,7 +26,7 @@ type SharedServiceContainer struct { bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty - trustedBuilderPublicKey ed25519.PublicKey + trustedBuilderPublicKey atomic.Pointer[ed25519.PublicKey] // TODO: bharath - we could make this an atomic pointer??? nextFeeRecipient common.Address // Fee recipient for the next block @@ -87,11 +88,11 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro // To decrease compute cost, we identify the next fee recipient at the start // and update it as we execute blocks. nextFeeRecipient := common.Address{} + nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1 if bc.Config().AstriaFeeCollectors == nil { log.Warn("fee asset collectors not set, assets will be burned") } else { maxHeightCollectorMatch := uint32(0) - nextBlock := uint32(bc.CurrentBlock().Number.Int64()) + 1 for height, collector := range bc.Config().AstriaFeeCollectors { if height <= nextBlock && height > maxHeightCollectorMatch { maxHeightCollectorMatch = height @@ -100,24 +101,33 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro } } - // TODO - is it desirable to not fail if the trusted builder public key is not set? - if bc.Config().AstriaTrustedBuilderPublicKey == "" { - return nil, errors.New("trusted builder public key not set") - } - // validate if its an ed25519 public key - if len(bc.Config().AstriaTrustedBuilderPublicKey) != ed25519.PublicKeySize { - return nil, errors.New("trusted builder public key is not a valid ed25519 public key") + trustedBuilderBlockMap := bc.Config().AstriaTrustedBuilderPublicKeys + trustedBuilderPublicKey := ed25519.PublicKey{} + if trustedBuilderBlockMap == nil { + return nil, errors.New("trusted builder public keys not set") + } else { + maxHeightCollectorMatch := uint32(0) + for height, publicKey := range trustedBuilderBlockMap { + if height <= nextBlock && height > maxHeightCollectorMatch { + maxHeightCollectorMatch = height + if len(publicKey) != ed25519.PublicKeySize { + return nil, errors.New("trusted builder public key is not a valid ed25519 public key") + } + trustedBuilderPublicKey = ed25519.PublicKey(publicKey) + } + } } sharedServiceContainer := &SharedServiceContainer{ - eth: eth, - bc: bc, - bridgeAddresses: bridgeAddresses, - bridgeAllowedAssets: bridgeAllowedAssets, - nextFeeRecipient: nextFeeRecipient, - trustedBuilderPublicKey: ed25519.PublicKey(bc.Config().AstriaTrustedBuilderPublicKey), + eth: eth, + bc: bc, + bridgeAddresses: bridgeAddresses, + bridgeAllowedAssets: bridgeAllowedAssets, + nextFeeRecipient: nextFeeRecipient, } + sharedServiceContainer.SetTrustedBuilderPublicKey(trustedBuilderPublicKey) + return sharedServiceContainer, nil } @@ -175,5 +185,9 @@ func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} { } func (s *SharedServiceContainer) TrustedBuilderPublicKey() ed25519.PublicKey { - return s.trustedBuilderPublicKey + return *s.trustedBuilderPublicKey.Load() +} + +func (s *SharedServiceContainer) SetTrustedBuilderPublicKey(newPublicKey ed25519.PublicKey) { + s.trustedBuilderPublicKey.Store(&newPublicKey) } diff --git a/grpc/shared/test_utils.go b/grpc/shared/test_utils.go index 8fd4f9c92..bcf87697c 100644 --- a/grpc/shared/test_utils.go +++ b/grpc/shared/test_utils.go @@ -67,7 +67,8 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri config.AstriaCelestiaInitialHeight = 10 config.AstriaCelestiaHeightVariance = 10 - config.AstriaTrustedBuilderPublicKey = string(trustedBuilderPubkey) + config.AstriaTrustedBuilderPublicKeys = make(map[uint32]string) + config.AstriaTrustedBuilderPublicKeys[1] = string(trustedBuilderPubkey) bech32mBridgeAddress, err := bech32.EncodeM(config.AstriaSequencerAddressPrefix, bridgeAddressBytes) if err != nil { diff --git a/params/config.go b/params/config.go index efdbca0fe..a28d1acfd 100644 --- a/params/config.go +++ b/params/config.go @@ -388,7 +388,7 @@ type ChainConfig struct { AstriaBridgeAddressConfigs []AstriaBridgeAddressConfig `json:"astriaBridgeAddresses,omitempty"` AstriaFeeCollectors map[uint32]common.Address `json:"astriaFeeCollectors"` AstriaEIP1559Params *AstriaEIP1559Params `json:"astriaEIP1559Params,omitempty"` - AstriaTrustedBuilderPublicKey string `json:"astriaTrustedBuilderPublicKey,omitempty"` + AstriaTrustedBuilderPublicKeys map[uint32]string `json:"astriaTrustedBuilderPublicKeys,omitempty"` } func (c *ChainConfig) AstriaExtraData() []byte { From 9e0e612ef2b293f11fa1c7b7b17a06c01ccf706f Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 20 Nov 2024 13:09:11 +0530 Subject: [PATCH 60/79] renaming --- grpc/execution/server_test.go | 64 +++++++++++++++++----------------- grpc/optimistic/server_test.go | 6 ++-- 2 files changed, 35 insertions(+), 35 deletions(-) diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 4b910e576..08a653339 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -21,11 +21,11 @@ import ( "testing" ) -func TestExecutionService_GetGenesisInfo(t *testing.T) { +func TestExecutionServiceV1_GetGenesisInfo(t *testing.T) { ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") hashedRollupId := sha256.Sum256([]byte(ethservice.BlockChain().Config().AstriaRollupName)) @@ -33,14 +33,14 @@ func TestExecutionService_GetGenesisInfo(t *testing.T) { require.True(t, bytes.Equal(genesisInfo.RollupId.Inner, hashedRollupId[:]), "RollupId is not correct") require.Equal(t, genesisInfo.GetSequencerGenesisBlockHeight(), ethservice.BlockChain().Config().AstriaSequencerInitialHeight, "SequencerInitialHeight is not correct") require.Equal(t, genesisInfo.GetCelestiaBlockVariance(), ethservice.BlockChain().Config().AstriaCelestiaHeightVariance, "CelestiaHeightVariance is not correct") - require.True(t, serviceV1Alpha1.sharedServiceContainer.GenesisInfoCalled(), "GetGenesisInfo should be called") + require.True(t, serviceV1.sharedServiceContainer.GenesisInfoCalled(), "GetGenesisInfo should be called") } -func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { +func TestExecutionServiceServerV1_GetCommitmentState(t *testing.T) { ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") @@ -60,12 +60,12 @@ func TestExecutionServiceServerV1Alpha2_GetCommitmentState(t *testing.T) { require.Equal(t, uint64(commitmentState.Firm.Number), firmBlock.Number.Uint64(), "Firm Block Number do not match") require.Equal(t, commitmentState.BaseCelestiaHeight, ethservice.BlockChain().Config().AstriaCelestiaInitialHeight, "BaseCelestiaHeight is not correct") - require.True(t, serviceV1Alpha1.sharedServiceContainer.CommitmentStateCalled(), "GetCommitmentState should be called") + require.True(t, serviceV1.sharedServiceContainer.CommitmentStateCalled(), "GetCommitmentState should be called") } -func TestExecutionService_GetBlock(t *testing.T) { +func TestExecutionServiceV1_GetBlock(t *testing.T) { ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { description string @@ -97,7 +97,7 @@ func TestExecutionService_GetBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - blockInfo, err := serviceV1Alpha1.GetBlock(context.Background(), tt.getBlockRequst) + blockInfo, err := serviceV1.GetBlock(context.Background(), tt.getBlockRequst) if tt.expectedReturnCode > 0 { require.NotNil(t, err, "GetBlock should return an error") require.Equal(t, tt.expectedReturnCode, status.Code(err), "GetBlock failed") @@ -123,9 +123,9 @@ func TestExecutionService_GetBlock(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { +func TestExecutionServiceServerV1_BatchGetBlocks(t *testing.T) { ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { description string @@ -175,7 +175,7 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - batchBlocksRes, err := serviceV1Alpha1.BatchGetBlocks(context.Background(), tt.batchGetBlockRequest) + batchBlocksRes, err := serviceV1.BatchGetBlocks(context.Background(), tt.batchGetBlockRequest) if tt.expectedReturnCode > 0 { require.NotNil(t, err, "BatchGetBlocks should return an error") require.Equal(t, tt.expectedReturnCode, status.Code(err), "BatchGetBlocks failed") @@ -195,7 +195,7 @@ func TestExecutionServiceServerV1Alpha2_BatchGetBlocks(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { +func TestExecutionServiceServerV1_ExecuteBlock(t *testing.T) { ethservice, _, _ := shared.SetupSharedService(t, 10) tests := []struct { @@ -249,18 +249,18 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { t.Run(tt.description, func(t *testing.T) { // reset the blockchain with each test ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) var err error // adding this to prevent shadowing of genesisInfo in the below if branch var genesisInfo *astriaPb.GenesisInfo var commitmentStateBeforeExecuteBlock *astriaPb.CommitmentState if tt.callGenesisInfoAndGetCommitmentState { // call getGenesisInfo and getCommitmentState before calling executeBlock - genesisInfo, err = serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err = serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") - commitmentStateBeforeExecuteBlock, err = serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentStateBeforeExecuteBlock, err = serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentStateBeforeExecuteBlock, "CommitmentState is nil") } @@ -319,7 +319,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { Transactions: marshalledTxs, } - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq) if tt.expectedReturnCode > 0 { require.NotNil(t, err, "ExecuteBlock should return an error") require.Equal(t, tt.expectedReturnCode, status.Code(err), "ExecuteBlock failed") @@ -331,7 +331,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { require.Equal(t, 0, astriaOrdered.Len(), "AstriaOrdered should be empty") // check if commitment state is not updated - commitmentStateAfterExecuteBlock, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentStateAfterExecuteBlock, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.Exactly(t, commitmentStateBeforeExecuteBlock, commitmentStateAfterExecuteBlock, "Commitment state should not be updated") @@ -341,17 +341,17 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlock(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testing.T) { +func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitment(t *testing.T) { ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") // call get commitment state - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") @@ -416,7 +416,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi Transactions: marshalledTxs, } - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq) require.Nil(t, err, "ExecuteBlock failed") require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") @@ -444,7 +444,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi }, } - updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) + updateCommitmentStateRes, err := serviceV1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) require.Nil(t, err, "UpdateCommitmentState failed") require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil") require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request") @@ -478,17 +478,17 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitment(t *testi } // Check that invalid transactions are not added into a block and are removed from the mempool -func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) { +func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) { ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) - serviceV1Alpha1 := SetupExecutionService(t, sharedServiceContainer) + serviceV1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info - genesisInfo, err := serviceV1Alpha1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) + genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) require.Nil(t, err, "GetGenesisInfo failed") require.NotNil(t, genesisInfo, "GenesisInfo is nil") // call get commitment state - commitmentState, err := serviceV1Alpha1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) + commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) require.Nil(t, err, "GetCommitmentState failed") require.NotNil(t, commitmentState, "CommitmentState is nil") @@ -541,7 +541,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval Transactions: marshalledTxs, } - executeBlockRes, err := serviceV1Alpha1.ExecuteBlock(context.Background(), executeBlockReq) + executeBlockRes, err := serviceV1.ExecuteBlock(context.Background(), executeBlockReq) require.Nil(t, err, "ExecuteBlock failed") require.NotNil(t, executeBlockRes, "ExecuteBlock response is nil") @@ -569,7 +569,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteBlockAndUpdateCommitmentWithInval }, } - updateCommitmentStateRes, err := serviceV1Alpha1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) + updateCommitmentStateRes, err := serviceV1.UpdateCommitmentState(context.Background(), updateCommitmentStateReq) require.Nil(t, err, "UpdateCommitmentState failed") require.NotNil(t, updateCommitmentStateRes, "UpdateCommitmentState response should not be nil") require.Equal(t, updateCommitmentStateRes, updateCommitmentStateReq.CommitmentState, "CommitmentState response should match request") diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index d048c9728..4b8141b71 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -23,7 +23,7 @@ import ( "time" ) -func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { +func TestOptimisticServiceServerV1Alpha1_ExecuteOptimisticBlock(t *testing.T) { ethService, _, _ := shared.SetupSharedService(t, 10) tests := []struct { @@ -193,7 +193,7 @@ func TestExecutionServiceServerV1Alpha2_ExecuteOptimisticBlock(t *testing.T) { } } -func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { +func TestNewOptimisticServiceServerV1Alpha_StreamBundles(t *testing.T) { ethservice, sharedService, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) @@ -357,7 +357,7 @@ func TestNewExecutionServiceServerV1Alpha2_StreamBundles(t *testing.T) { } } -func TestExecutionServiceServerV1Alpha2_StreamExecuteOptimisticBlock(t *testing.T) { +func TestOptimisticServiceServerV1_StreamExecuteOptimisticBlock(t *testing.T) { ethservice, sharedService, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) From f4bc72510fbf4c52c655400d403db8df89a3b987 Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 20 Nov 2024 19:37:05 +0530 Subject: [PATCH 61/79] update protos --- go.mod | 14 +-- go.sum | 18 ++++ grpc/execution/server.go | 19 +++- grpc/execution/server_test.go | 16 +-- grpc/optimistic/server.go | 22 +--- grpc/optimistic/server_test.go | 8 +- grpc/shared/bech32m.go | 92 +++++++++++++++++ grpc/shared/container.go | 24 ++--- grpc/shared/test_setup.go | 6 +- grpc/shared/test_utils.go | 22 ++-- grpc/shared/validation.go | 53 +++++----- grpc/shared/validation_test.go | 179 +++++++++++++++++---------------- params/config.go | 2 +- 13 files changed, 300 insertions(+), 175 deletions(-) create mode 100644 grpc/shared/bech32m.go diff --git a/go.mod b/go.mod index 2e6fe961d..44aeb9066 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module github.com/ethereum/go-ethereum go 1.21 require ( - buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 - buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1 - buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 - buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1 + buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1 + buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1 + buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1 + buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Microsoft/go-winio v0.6.1 github.com/VictoriaMetrics/fastcache v1.12.1 @@ -15,7 +15,7 @@ require ( github.com/aws/aws-sdk-go-v2/credentials v1.13.43 github.com/aws/aws-sdk-go-v2/service/route53 v1.30.2 github.com/btcsuite/btcd/btcec/v2 v2.2.0 - github.com/btcsuite/btcd/btcutil v1.1.5 + github.com/btcsuite/btcd/btcutil v1.1.6 github.com/cespare/cp v0.1.0 github.com/cloudflare/cloudflare-go v0.79.0 github.com/cockroachdb/pebble v1.1.0 @@ -79,7 +79,7 @@ require ( golang.org/x/time v0.5.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.35.1 + google.golang.org/protobuf v1.35.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 ) @@ -101,6 +101,8 @@ require ( github.com/aws/smithy-go v1.15.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/bits-and-blooms/bitset v1.10.0 // indirect + github.com/btcsuite/btcd v0.24.2 // indirect + github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cockroachdb/errors v1.11.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect diff --git a/go.sum b/go.sum index 7f7f15830..5ad9748dd 100644 --- a/go.sum +++ b/go.sum @@ -1,15 +1,23 @@ buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 h1:wOry49zAbse0G4mt2tFTwa4P2AUMuYCR/0mYcPrpcbs= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1/go.mod h1:+pVCkEpJNp2JtooS8NiydT7bO9+hu11XUZ5Z47DPtXo= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1 h1:gS4erruX5XeMN0MZ7xe4JmEIR3uCWrvzG5HGV725WiI= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1/go.mod h1:oXNLXPUVa006hXUuEk+z5isisNlEbrm0yS+XJeMj6u4= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1 h1:v7QnrDjNmG7I/0aqZdtlP3cBPQGd62w4AYVF8TfAcHM= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:T5EsLvEE5UMk62gVSwNY/7XlxknAP3sL8tYRsU68b4s= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1 h1:VkPk2LvyNK8NF9WmAnodrwgQZ3JiYAHFEmPKXUtlX4E= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-00000000000000-cc31a327d543.1/go.mod h1:xzRLiRun3wTzhd+oBg9VkXi/c4PhjBjj73+2vSMH5eM= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1 h1:3G2O21DuY5Y/G32tP1mAI16AxwDYTscG2YaOb/WQty0= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:U4LUlabiYNYBd1pqYS9o8SsHjBRoEBysrfRVnebzJH0= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1 h1:Twi169wrd7ssCnK27Bymlytv5LmvwFV0zhKhJ64nCYM= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1/go.mod h1:PWzMbPHJ+Y31iNFrtSc5vy/wvm2805ZXyDZndzzFLa0= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 h1:kG4riHqlF9X6iZ1Oxs5/6ul6aue7MS+A6DK6HAchuTk= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1/go.mod h1:n9L7X3VAj4od4VHf2ScJuHARUUQTSxJqtRHZk/7Ptt0= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1 h1:C1bT0G1In6Z6tBERd1XqwDjdxTK+PatSOJYlVk5Is60= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1/go.mod h1:I9FcB1oNqT1nI+ny0GD8gF9YrIYrHmczgNu6MTE9fAo= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1 h1:hPMoxTiT7jJjnIbWqneBbL05VeVOTD9UeC/qdvzHL8g= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1/go.mod h1:2uasRFMH+a3DaF34c1o+w7/YtYnoknmARyYpb9W2QIc= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1 h1:uJm/22xugluY5AL2NkIDbNEFBxzN6UcI8vts/bGEDBs= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1/go.mod h1:1Z9P18WNTOT+KvLlc0+2FkcBJ7l5eRUUFcnOxHmLeRA= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -113,6 +121,8 @@ github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13P github.com/btcsuite/btcd v0.22.0-beta.0.20220111032746-97732e52810c/go.mod h1:tjmYdS6MLJ5/s0Fj4DbLgSbDHbEqLJrtnHecBFkdz5M= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd h1:js1gPwhcFflTZ7Nzl7WHaOTlTr5hIrR4n1NM4v9n4Kw= github.com/btcsuite/btcd v0.23.5-0.20231215221805-96c9fd8078fd/go.mod h1:nm3Bko6zh6bWP60UxwoT5LzdGJsQJaPo6HjduXq9p6A= +github.com/btcsuite/btcd v0.24.2 h1:aLmxPguqxza+4ag8R1I2nnJjSu2iFn/kqtHTIImswcY= +github.com/btcsuite/btcd v0.24.2/go.mod h1:5C8ChTkl5ejr3WHj8tkQSCmydiMEPB0ZhQhehpq7Dgg= github.com/btcsuite/btcd/btcec/v2 v2.1.0/go.mod h1:2VzYrv4Gm4apmbVVsSq5bqf1Ec8v56E48Vt0Y/umPgA= github.com/btcsuite/btcd/btcec/v2 v2.1.3/go.mod h1:ctjw4H1kknNJmRN4iP1R7bTQ+v3GJkZBd6mui8ZsAZE= github.com/btcsuite/btcd/btcec/v2 v2.2.0 h1:fzn1qaOt32TuLjFlkzYSsBC35Q3KUjT1SwPxiMSCF5k= @@ -121,6 +131,8 @@ github.com/btcsuite/btcd/btcutil v1.0.0/go.mod h1:Uoxwv0pqYWhD//tfTiipkxNfdhG9Ur github.com/btcsuite/btcd/btcutil v1.1.0/go.mod h1:5OapHB7A2hBBWLm48mmw4MOHNJCcUBTwmWH/0Jn8VHE= github.com/btcsuite/btcd/btcutil v1.1.5 h1:+wER79R5670vs/ZusMTF1yTcRYE5GUsFbdjdisflzM8= github.com/btcsuite/btcd/btcutil v1.1.5/go.mod h1:PSZZ4UitpLBWzxGd5VGOrLnmOjtPP/a6HaFo12zMs00= +github.com/btcsuite/btcd/btcutil v1.1.6 h1:zFL2+c3Lb9gEgqKNzowKUPQNb8jV7v5Oaodi/AYFd6c= +github.com/btcsuite/btcd/btcutil v1.1.6/go.mod h1:9dFymx8HpuLqBnsPELrImQeTQfKBQqzqGbbV3jK55aE= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ= @@ -543,11 +555,15 @@ github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobt github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4= @@ -905,6 +921,8 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= +google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 5befdad20..46009bee3 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -6,7 +6,6 @@ package execution import ( "context" - "crypto/ed25519" "crypto/sha256" "fmt" "github.com/ethereum/go-ethereum/eth" @@ -171,7 +170,9 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria // the height that this block will be at height := s.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes(), s.TrustedBuilderPublicKey()) + addressPrefix := s.Bc().Config().AstriaSequencerAddressPrefix + + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes(), s.TrustedBuilderPublicKey(), addressPrefix) // This set of ordered TXs on the TxPool is has been configured to be used by // the Miner when building a payload. @@ -221,6 +222,14 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria s.SetNextFeeRecipient(next) } + if address, ok := s.Bc().Config().AstriaTrustedBuilderAddresses[res.Number+1]; ok { + if err := shared.ValidateBech32mAddress(address, addressPrefix); err != nil { + log.Error("trusted builder address is not a valid bech32 address", "block", res.Number+1, "address", address) + } + + s.SetTrustedBuilderPublicKey(address) + } + log.Info("ExecuteBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) totalExecutedTxCount.Inc(int64(len(block.Transactions()))) executeBlockSuccessCount.Inc(1) @@ -432,6 +441,10 @@ func (s *ExecutionServiceServerV1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } -func (s *ExecutionServiceServerV1) TrustedBuilderPublicKey() ed25519.PublicKey { +func (s *ExecutionServiceServerV1) TrustedBuilderPublicKey() string { return s.sharedServiceContainer.TrustedBuilderPublicKey() } + +func (s *ExecutionServiceServerV1) SetTrustedBuilderPublicKey(trustedBuilderPublicKey string) { + s.sharedServiceContainer.SetTrustedBuilderPublicKey(trustedBuilderPublicKey) +} diff --git a/grpc/execution/server_test.go b/grpc/execution/server_test.go index 08a653339..211be906b 100644 --- a/grpc/execution/server_test.go +++ b/grpc/execution/server_test.go @@ -22,7 +22,7 @@ import ( ) func TestExecutionServiceV1_GetGenesisInfo(t *testing.T) { - ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) serviceV1 := SetupExecutionService(t, sharedServiceContainer) genesisInfo, err := serviceV1.GetGenesisInfo(context.Background(), &astriaPb.GetGenesisInfoRequest{}) @@ -37,7 +37,7 @@ func TestExecutionServiceV1_GetGenesisInfo(t *testing.T) { } func TestExecutionServiceServerV1_GetCommitmentState(t *testing.T) { - ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) serviceV1 := SetupExecutionService(t, sharedServiceContainer) commitmentState, err := serviceV1.GetCommitmentState(context.Background(), &astriaPb.GetCommitmentStateRequest{}) @@ -64,7 +64,7 @@ func TestExecutionServiceServerV1_GetCommitmentState(t *testing.T) { } func TestExecutionServiceV1_GetBlock(t *testing.T) { - ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) serviceV1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { @@ -124,7 +124,7 @@ func TestExecutionServiceV1_GetBlock(t *testing.T) { } func TestExecutionServiceServerV1_BatchGetBlocks(t *testing.T) { - ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) serviceV1 := SetupExecutionService(t, sharedServiceContainer) tests := []struct { @@ -196,7 +196,7 @@ func TestExecutionServiceServerV1_BatchGetBlocks(t *testing.T) { } func TestExecutionServiceServerV1_ExecuteBlock(t *testing.T) { - ethservice, _, _ := shared.SetupSharedService(t, 10) + ethservice, _, _, _ := shared.SetupSharedService(t, 10) tests := []struct { description string @@ -248,7 +248,7 @@ func TestExecutionServiceServerV1_ExecuteBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { // reset the blockchain with each test - ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) serviceV1 := SetupExecutionService(t, sharedServiceContainer) var err error // adding this to prevent shadowing of genesisInfo in the below if branch @@ -342,7 +342,7 @@ func TestExecutionServiceServerV1_ExecuteBlock(t *testing.T) { } func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitment(t *testing.T) { - ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) serviceV1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info @@ -479,7 +479,7 @@ func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitment(t *testing.T) // Check that invalid transactions are not added into a block and are removed from the mempool func TestExecutionServiceServerV1_ExecuteBlockAndUpdateCommitmentWithInvalidTransactions(t *testing.T) { - ethservice, sharedServiceContainer, _ := shared.SetupSharedService(t, 10) + ethservice, sharedServiceContainer, _, _ := shared.SetupSharedService(t, 10) serviceV1 := SetupExecutionService(t, sharedServiceContainer) // call genesis info diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 92ebc2d63..8718db269 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -5,7 +5,6 @@ import ( optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" "context" - "crypto/ed25519" "errors" "fmt" "github.com/ethereum/go-ethereum/beacon/engine" @@ -162,10 +161,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, executionStart := time.Now() defer executionOptimisticBlockTimer.UpdateSince(executionStart) - o.CommitmentUpdateLock().Lock() - // get the soft block softBlock := o.Bc().CurrentSafeBlock() - o.CommitmentUpdateLock().Unlock() o.BlockExecutionLock().Lock() nextFeeRecipient := o.NextFeeRecipient() @@ -174,7 +170,9 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, // the height that this block will be at height := o.Bc().CurrentBlock().Number.Uint64() + 1 - txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes(), o.TrustedBuilderPublicKey()) + addressPrefix := o.Bc().Config().AstriaSequencerAddressPrefix + + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes(), o.TrustedBuilderPublicKey(), addressPrefix) // Build a payload to add to the chain payloadAttributes := &miner.BuildPayloadArgs{ @@ -218,14 +216,6 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, }, } - if publicKey, ok := o.Bc().Config().AstriaTrustedBuilderPublicKeys[res.Number+1]; ok { - if len(publicKey) != ed25519.PublicKeySize { - log.Error("trusted builder public key is not a valid ed25519 public", "block", res.Number+1, "publicKey", publicKey) - } - - o.SetTrustedBuilderPublicKey(ed25519.PublicKey(publicKey)) - } - log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) executeOptimisticBlockSuccessCount.Inc(1) @@ -284,10 +274,6 @@ func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } -func (s *OptimisticServiceV1Alpha1) TrustedBuilderPublicKey() ed25519.PublicKey { +func (s *OptimisticServiceV1Alpha1) TrustedBuilderPublicKey() string { return s.sharedServiceContainer.TrustedBuilderPublicKey() } - -func (s *OptimisticServiceV1Alpha1) SetTrustedBuilderPublicKey(trustedBuilderPublicKey ed25519.PublicKey) { - s.sharedServiceContainer.SetTrustedBuilderPublicKey(trustedBuilderPublicKey) -} diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index 4b8141b71..ff2359520 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -24,7 +24,7 @@ import ( ) func TestOptimisticServiceServerV1Alpha1_ExecuteOptimisticBlock(t *testing.T) { - ethService, _, _ := shared.SetupSharedService(t, 10) + ethService, _, _, _ := shared.SetupSharedService(t, 10) tests := []struct { description string @@ -66,7 +66,7 @@ func TestOptimisticServiceServerV1Alpha1_ExecuteOptimisticBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { - ethservice, sharedService, _ := shared.SetupSharedService(t, 10) + ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10) // reset the blockchain with each test optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) @@ -194,7 +194,7 @@ func TestOptimisticServiceServerV1Alpha1_ExecuteOptimisticBlock(t *testing.T) { } func TestNewOptimisticServiceServerV1Alpha_StreamBundles(t *testing.T) { - ethservice, sharedService, _ := shared.SetupSharedService(t, 10) + ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) executionServiceV1 := execution.SetupExecutionService(t, sharedService) @@ -358,7 +358,7 @@ func TestNewOptimisticServiceServerV1Alpha_StreamBundles(t *testing.T) { } func TestOptimisticServiceServerV1_StreamExecuteOptimisticBlock(t *testing.T) { - ethservice, sharedService, _ := shared.SetupSharedService(t, 10) + ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) executionServiceV1 := execution.SetupExecutionService(t, sharedService) diff --git a/grpc/shared/bech32m.go b/grpc/shared/bech32m.go new file mode 100644 index 000000000..f4c5237ac --- /dev/null +++ b/grpc/shared/bech32m.go @@ -0,0 +1,92 @@ +package shared + +// Copied from astria-cli-go bech32m module (https://github.com/astriaorg/astria-cli-go/blob/d5ef82f718325b2907634c108d42b503211c20e6/modules/bech32m/bech32m.go#L1) +// TODO: organize the bech32m usage throughout the codebase + +import ( + "crypto/ed25519" + "crypto/sha256" + "fmt" + + "github.com/btcsuite/btcd/btcutil/bech32" +) + +type Address struct { + address string + prefix string + bytes [20]byte +} + +// String returns the bech32m address as a string +func (a *Address) String() string { + return a.address +} + +// Prefix returns the prefix of the bech32m address +func (a *Address) Prefix() string { + return a.prefix +} + +// Bytes returns the underlying bytes for the bech32m address as a [20]byte array +func (a *Address) Bytes() [20]byte { + return a.bytes +} + +// ValidateBech32mAddress verifies that a string in a valid bech32m address. It +// will return nil if the address is valid, otherwise it will return an error. +func ValidateBech32mAddress(address string, intendedPrefix string) error { + prefix, byteAddress, version, err := bech32.DecodeGeneric(address) + if err != nil { + return fmt.Errorf("address must be a bech32 encoded string") + } + if version != bech32.VersionM { + return fmt.Errorf("address must be a bech32m address") + } + byteAddress, err = bech32.ConvertBits(byteAddress, 5, 8, false) + if err != nil { + return fmt.Errorf("failed to convert address to 8 bit") + } + if prefix == "" { + return fmt.Errorf("address must have prefix") + } + if prefix != intendedPrefix { + return fmt.Errorf("address must have prefix %s", intendedPrefix) + } + + if len(byteAddress) != 20 { + return fmt.Errorf("address must decode to a 20 length byte array: got len %d", len(byteAddress)) + } + + return nil +} + +// EncodeFromBytes creates a *Address from a [20]byte array and string +// prefix. +func EncodeFromBytes(prefix string, data [20]byte) (string, error) { + // Convert the data from 8-bit groups to 5-bit + convertedBytes, err := bech32.ConvertBits(data[:], 8, 5, true) + if err != nil { + return "", fmt.Errorf("failed to convert bits from 8-bit groups to 5-bit groups: %v", err) + } + + // Encode the data as bech32m + address, err := bech32.EncodeM(prefix, convertedBytes) + if err != nil { + return "", fmt.Errorf("failed to encode address as bech32m: %v", err) + } + + return address, nil +} + +// EncodeFromPublicKey takes an ed25519 public key and string prefix and encodes +// them into a *Address. +func EncodeFromPublicKey(prefix string, pubkey ed25519.PublicKey) (string, error) { + hash := sha256.Sum256(pubkey) + var addr [20]byte + copy(addr[:], hash[:20]) + address, err := EncodeFromBytes(prefix, addr) + if err != nil { + return "", err + } + return address, nil +} diff --git a/grpc/shared/container.go b/grpc/shared/container.go index 320eeeda6..06dcbe56a 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -1,14 +1,13 @@ package shared import ( - "crypto/ed25519" - "errors" "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" "github.com/ethereum/go-ethereum/eth" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" + "github.com/pkg/errors" "sync" "sync/atomic" ) @@ -26,7 +25,8 @@ type SharedServiceContainer struct { bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty - trustedBuilderPublicKey atomic.Pointer[ed25519.PublicKey] + // trusted builder public key is a bech32m address + trustedBuilderPublicKey atomic.Pointer[string] // TODO: bharath - we could make this an atomic pointer??? nextFeeRecipient common.Address // Fee recipient for the next block @@ -101,19 +101,19 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro } } - trustedBuilderBlockMap := bc.Config().AstriaTrustedBuilderPublicKeys - trustedBuilderPublicKey := ed25519.PublicKey{} + trustedBuilderBlockMap := bc.Config().AstriaTrustedBuilderAddresses + trustedBuilderPublicKey := "" if trustedBuilderBlockMap == nil { return nil, errors.New("trusted builder public keys not set") } else { maxHeightCollectorMatch := uint32(0) - for height, publicKey := range trustedBuilderBlockMap { + for height, address := range trustedBuilderBlockMap { if height <= nextBlock && height > maxHeightCollectorMatch { maxHeightCollectorMatch = height - if len(publicKey) != ed25519.PublicKeySize { - return nil, errors.New("trusted builder public key is not a valid ed25519 public key") + if err := ValidateBech32mAddress(address, bc.Config().AstriaSequencerAddressPrefix); err != nil { + return nil, errors.Wrapf(err, "trusted builder address %s at height %d is invalid", address, height) } - trustedBuilderPublicKey = ed25519.PublicKey(publicKey) + trustedBuilderPublicKey = address } } } @@ -184,10 +184,10 @@ func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} { return s.bridgeAllowedAssets } -func (s *SharedServiceContainer) TrustedBuilderPublicKey() ed25519.PublicKey { +func (s *SharedServiceContainer) TrustedBuilderPublicKey() string { return *s.trustedBuilderPublicKey.Load() } -func (s *SharedServiceContainer) SetTrustedBuilderPublicKey(newPublicKey ed25519.PublicKey) { - s.trustedBuilderPublicKey.Store(&newPublicKey) +func (s *SharedServiceContainer) SetTrustedBuilderPublicKey(newAddress string) { + s.trustedBuilderPublicKey.Store(&newAddress) } diff --git a/grpc/shared/test_setup.go b/grpc/shared/test_setup.go index ead5ababd..d916865b6 100644 --- a/grpc/shared/test_setup.go +++ b/grpc/shared/test_setup.go @@ -8,9 +8,9 @@ import ( "testing" ) -func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer, ed25519.PrivateKey) { +func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer, ed25519.PrivateKey, ed25519.PublicKey) { t.Helper() - genesis, blocks, bridgeAddress, feeCollectorKey, trustedBuilderPrivkey := GenerateMergeChain(noOfBlocksToGenerate, true) + genesis, blocks, bridgeAddress, feeCollectorKey, trustedBuilderPrivkey, trustedBuilderPubKey := GenerateMergeChain(noOfBlocksToGenerate, true) ethservice := StartEthService(t, genesis) sharedService, err := NewSharedServiceContainer(ethservice) @@ -32,5 +32,5 @@ func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, // FIXME - this interface isn't right for the tests, we shouldn't be exposing the trusted builder priv key like this // we should instead allow the test to create it and pass it to the shared service container in the constructor // but that can make the codebase a bit weird, so we can leave it like this for now - return ethservice, sharedService, trustedBuilderPrivkey + return ethservice, sharedService, trustedBuilderPrivkey, trustedBuilderPubKey } diff --git a/grpc/shared/test_utils.go b/grpc/shared/test_utils.go index bcf87697c..3031d6e36 100644 --- a/grpc/shared/test_utils.go +++ b/grpc/shared/test_utils.go @@ -37,7 +37,7 @@ var ( testBalance = big.NewInt(2e18) ) -func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey, ed25519.PrivateKey) { +func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, string, *ecdsa.PrivateKey, ed25519.PrivateKey, ed25519.PublicKey) { config := *params.AllEthashProtocolChanges engine := consensus.Engine(beaconConsensus.New(ethash.NewFaker())) if merged { @@ -56,19 +56,23 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri panic(err) } - trustedBuilderPubkey, trustedBuilderPrivkey, err := ed25519.GenerateKey(nil) - if err != nil { - panic(err) - } - config.AstriaRollupName = "astria" config.AstriaSequencerAddressPrefix = "astria" config.AstriaSequencerInitialHeight = 10 config.AstriaCelestiaInitialHeight = 10 config.AstriaCelestiaHeightVariance = 10 - config.AstriaTrustedBuilderPublicKeys = make(map[uint32]string) - config.AstriaTrustedBuilderPublicKeys[1] = string(trustedBuilderPubkey) + trustedBuilderPubkey, trustedBuilderPrivkey, err := ed25519.GenerateKey(nil) + if err != nil { + panic(err) + } + trustedBuilderAddress, err := EncodeFromPublicKey(config.AstriaSequencerAddressPrefix, trustedBuilderPubkey) + if err != nil { + panic(err) + } + + config.AstriaTrustedBuilderAddresses = make(map[uint32]string) + config.AstriaTrustedBuilderAddresses[1] = trustedBuilderAddress bech32mBridgeAddress, err := bech32.EncodeM(config.AstriaSequencerAddressPrefix, bridgeAddressBytes) if err != nil { @@ -123,7 +127,7 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri config.TerminalTotalDifficulty = totalDifficulty } - return genesis, blocks, bech32mBridgeAddress, feeCollectorKey, trustedBuilderPrivkey + return genesis, blocks, bech32mBridgeAddress, feeCollectorKey, trustedBuilderPrivkey, trustedBuilderPubkey } // startEthService creates a full node instance for testing. diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index 14317c81c..6d00f9f32 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -113,31 +113,35 @@ func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*type return ethTx, nil } -func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, prevBlockHash []byte, trustedBuilderPubKey ed25519.PublicKey) (types.Transactions, error) { +func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHash []byte, trustedBuilderBech32Address string, addressPrefix string) (types.Transactions, error) { processedTxs := types.Transactions{} - allocation := auctionResult.GetAllocation() + payload := allocation.GetPayload() - if !bytes.Equal(allocation.PrevRollupBlockHash, prevBlockHash) { + if !bytes.Equal(payload.PrevRollupBlockHash, prevBlockHash) { return nil, errors.New("prev block hash do not match in allocation") } - message, err := proto.Marshal(auctionResult.GetAllocation()) + publicKey := ed25519.PublicKey(allocation.GetPublicKey()) + bech32Address, err := EncodeFromPublicKey(addressPrefix, publicKey) if err != nil { - return nil, errors.Wrap(err, "failed to marshal allocation") + return nil, errors.Wrapf(err, "failed to encode public key to bech32m address: %s", publicKey) + } + if trustedBuilderBech32Address != bech32Address { + return nil, errors.Errorf("public key in allocation does not match trusted builder public key. expected: %s, got: %s", trustedBuilderBech32Address, bech32Address) } - publicKey := ed25519.PublicKey(auctionResult.GetPublicKey()) - if !trustedBuilderPubKey.Equal(publicKey) { - return nil, errors.New("public key in auction result does not match trusted builder public key") + message, err := proto.Marshal(allocation.GetPayload()) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal allocation") } - signature := auctionResult.GetSignature() + signature := allocation.GetSignature() if !ed25519.Verify(publicKey, message, signature) { return nil, errors.New("failed to verify signature") } // unmarshall the transactions in the bundle - for _, allocationTx := range allocation.GetTransactions() { + for _, allocationTx := range payload.GetTransactions() { ethtx := new(types.Transaction) err := ethtx.UnmarshalBinary(allocationTx) if err != nil { @@ -151,12 +155,13 @@ func unmarshallAuctionResultTxs(auctionResult *bundlev1alpha1.AuctionResult, pre } // `UnbundleRollupDataTransactions` takes in a list of rollup data transactions and returns a list of Ethereum transactions. +// TODO - this function has become too big. we should start breaking it down func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, - bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, trustedBuilderPubKey ed25519.PublicKey) types.Transactions { + bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, trustedBuilderBech32Address string, addressPrefix string) types.Transactions { processedTxs := types.Transactions{} - auctionResultTxs := types.Transactions{} - // we just return the auction result here and do not unmarshall the transactions in the bundle if we find it - var auctionResult *bundlev1alpha1.AuctionResult + allocationTxs := types.Transactions{} + // we just return the allocation here and do not unmarshall the transactions in the bundle if we find it + var allocation *bundlev1alpha1.Allocation for _, tx := range txs { if deposit := tx.GetDeposit(); deposit != nil { depositTx, err := validateAndUnmarshalDepositTx(deposit, height, bridgeAddresses, bridgeAllowedAssets) @@ -168,20 +173,20 @@ func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height u processedTxs = append(processedTxs, depositTx) } else { sequenceData := tx.GetSequencedData() - // check if sequence data is of type AuctionResult - if auctionResult == nil { + // check if sequence data is of type Allocation + if allocation == nil { // TODO - check if we can avoid a temp value - tempAuctionResult := &bundlev1alpha1.AuctionResult{} - err := proto.Unmarshal(sequenceData, tempAuctionResult) + tempAllocation := &bundlev1alpha1.Allocation{} + err := proto.Unmarshal(sequenceData, tempAllocation) if err == nil { - unmarshalledAuctionResultTxs, err := unmarshallAuctionResultTxs(tempAuctionResult, prevBlockHash, trustedBuilderPubKey) + unmarshalledAllocationTxs, err := unmarshallAllocationTxs(tempAllocation, prevBlockHash, trustedBuilderBech32Address, addressPrefix) if err != nil { - log.Error("failed to unmarshall auction result transactions", "error", err) + log.Error("failed to unmarshall allocation transactions", "error", err) continue } - auctionResult = tempAuctionResult - auctionResultTxs = unmarshalledAuctionResultTxs + allocation = tempAllocation + allocationTxs = unmarshalledAllocationTxs } else { ethtx, err := validateAndUnmarshallSequenceAction(tx) if err != nil { @@ -201,8 +206,8 @@ func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height u } } - // prepend auctionResultTxs to processedTxs - processedTxs = append(auctionResultTxs, processedTxs...) + // prepend allocation txs to processedTxs + processedTxs = append(allocationTxs, processedTxs...) return processedTxs } diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 71055ef53..734fcc249 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -72,8 +72,9 @@ func generateBech32MAddress() string { return bech32m } -func TestUnmarshallAuctionResultTxs(t *testing.T) { - _, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) +func TestUnmarshallAllocationTxs(t *testing.T) { + ethService, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + addressPrefix := ethService.BlockChain().Config().AstriaSequencerAddressPrefix tx1 := transaction(0, 1000, TestKey) validMarshalledTx1, err := tx1.MarshalBinary() @@ -87,15 +88,15 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { validMarshalledTx3, err := tx3.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - validAllocation := &bundlev1alpha1.Bundle{ + validPayload := &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, BaseSequencerBlockHash: []byte("sequencer block hash"), PrevRollupBlockHash: []byte("prev rollup block hash"), } - marshalledAllocation, err := proto.Marshal(validAllocation) - require.NoError(t, err, "failed to marshal allocation: %v", err) + marshalledAllocation, err := proto.Marshal(validPayload) + require.NoError(t, err, "failed to marshal payload: %v", err) signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ Hash: 0, @@ -105,7 +106,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { tests := []struct { description string - auctionResult *bundlev1alpha1.AuctionResult + allocation *bundlev1alpha1.Allocation prevBlockHash []byte expectedOutput types.Transactions // just check if error contains the string since error contains other details @@ -113,11 +114,11 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { }{ { description: "previous block hash mismatch", - auctionResult: &bundlev1alpha1.AuctionResult{ + allocation: &bundlev1alpha1.Allocation{ // TODO - add signature and public key validation Signature: make([]byte, 0), PublicKey: make([]byte, 0), - Allocation: &bundlev1alpha1.Bundle{ + Payload: &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, BaseSequencerBlockHash: []byte("sequencer block hash"), @@ -130,10 +131,10 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { }, { description: "public key doesn't match", - auctionResult: &bundlev1alpha1.AuctionResult{ + allocation: &bundlev1alpha1.Allocation{ Signature: []byte("invalid signature"), PublicKey: []byte("invalid public key"), - Allocation: &bundlev1alpha1.Bundle{ + Payload: &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, BaseSequencerBlockHash: []byte("sequencer block hash"), @@ -142,14 +143,14 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { }, prevBlockHash: []byte("prev rollup block hash"), expectedOutput: types.Transactions{}, - wantErr: "public key in auction result does not match trusted builder public key", + wantErr: "public key in allocation does not match trusted builder public key", }, { description: "invalid signature", - auctionResult: &bundlev1alpha1.AuctionResult{ + allocation: &bundlev1alpha1.Allocation{ Signature: []byte("invalid signature"), - PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), - Allocation: &bundlev1alpha1.Bundle{ + PublicKey: trustedBuilderPubKey, + Payload: &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, BaseSequencerBlockHash: []byte("sequencer block hash"), @@ -161,11 +162,11 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { wantErr: "failed to verify signature", }, { - description: "valid auction result", - auctionResult: &bundlev1alpha1.AuctionResult{ - Signature: signedAllocation, - PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), - Allocation: validAllocation, + description: "valid allocation", + allocation: &bundlev1alpha1.Allocation{ + Signature: signedAllocation, + PublicKey: trustedBuilderPubKey, + Payload: validPayload, }, prevBlockHash: []byte("prev rollup block hash"), expectedOutput: types.Transactions{tx1, tx2, tx3}, @@ -175,7 +176,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - finalTxs, err := unmarshallAuctionResultTxs(test.auctionResult, test.prevBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) + finalTxs, err := unmarshallAllocationTxs(test.allocation, test.prevBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) if test.wantErr == "" && err == nil { for _, tx := range test.expectedOutput { foundTx := false @@ -196,7 +197,7 @@ func TestUnmarshallAuctionResultTxs(t *testing.T) { } func TestValidateAndUnmarshallDepositTx(t *testing.T) { - ethservice, serviceV1Alpha1, _ := SetupSharedService(t, 10) + ethservice, serviceV1Alpha1, _, _ := SetupSharedService(t, 10) chainDestinationKey, err := crypto.GenerateKey() require.Nil(t, err, "failed to generate chain destination key: %v", err) @@ -368,7 +369,9 @@ func TestValidateAndUnmarshallSequenceAction(t *testing.T) { } func TestUnbundleRollupData(t *testing.T) { - ethservice, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) + ethservice, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + + addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix baseSequencerBlockHash := []byte("sequencer block hash") prevRollupBlockHash := []byte("prev rollup block hash") @@ -391,32 +394,32 @@ func TestUnbundleRollupData(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - allocation := &bundlev1alpha1.Bundle{ + payload := &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, BaseSequencerBlockHash: baseSequencerBlockHash, PrevRollupBlockHash: prevRollupBlockHash, } - marshalledAllocation, err := proto.Marshal(allocation) - require.NoError(t, err, "failed to marshal allocation: %v", err) - signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + marshalledPayload, err := proto.Marshal(payload) + require.NoError(t, err, "failed to marshal payload: %v", err) + signedPayload, err := trustedBuilderPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ Hash: 0, Context: "", }) - require.NoError(t, err, "failed to sign allocation: %v", err) + require.NoError(t, err, "failed to sign payload: %v", err) - auctionResult := &bundlev1alpha1.AuctionResult{ - Signature: signedAllocation, - PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), - Allocation: allocation, + allocation := &bundlev1alpha1.Allocation{ + Signature: signedPayload, + PublicKey: trustedBuilderPubKey, + Payload: payload, } - marshalledAuctionResult, err := proto.Marshal(auctionResult) - require.NoError(t, err, "failed to marshal auction result: %v", err) - auctionResultSequenceData := &sequencerblockv1.RollupData{ + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + allocationSequenceData := &sequencerblockv1.RollupData{ Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: marshalledAuctionResult, + SequencedData: marshalledAllocation, }, } seqData1 := &sequencerblockv1.RollupData{ @@ -450,13 +453,13 @@ func TestUnbundleRollupData(t *testing.T) { SourceActionIndex: 0, }}} - finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, auctionResultSequenceData, depositTx} + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, depositTx} - txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") - // auction result txs should be the first 3 + // allocation txs should be the first 3 require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") @@ -464,8 +467,9 @@ func TestUnbundleRollupData(t *testing.T) { require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") } -func TestUnbundleRollupDataWithDuplicateAuctionResults(t *testing.T) { - ethservice, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) +func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { + ethservice, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix baseSequencerBlockHash := []byte("sequencer block hash") prevRollupBlockHash := []byte("prev rollup block hash") @@ -488,38 +492,38 @@ func TestUnbundleRollupDataWithDuplicateAuctionResults(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - allocation := &bundlev1alpha1.Bundle{ + payload := &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, BaseSequencerBlockHash: baseSequencerBlockHash, PrevRollupBlockHash: prevRollupBlockHash, } - marshalledAllocation, err := proto.Marshal(allocation) - require.NoError(t, err, "failed to marshal allocation: %v", err) - signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + marshalledPayload, err := proto.Marshal(payload) + require.NoError(t, err, "failed to marshal payload: %v", err) + signedPayload, err := trustedBuilderPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ Hash: 0, Context: "", }) - require.NoError(t, err, "failed to sign allocation: %v", err) + require.NoError(t, err, "failed to sign payload: %v", err) - auctionResult := &bundlev1alpha1.AuctionResult{ - Signature: signedAllocation, - PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), - Allocation: allocation, + allocation := &bundlev1alpha1.Allocation{ + Signature: signedPayload, + PublicKey: trustedBuilderPubKey, + Payload: payload, } - marshalledAuctionResult, err := proto.Marshal(auctionResult) - require.NoError(t, err, "failed to marshal auction result: %v", err) - auctionResultSequenceData := &sequencerblockv1.RollupData{ + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) + allocationSequenceData := &sequencerblockv1.RollupData{ Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: marshalledAuctionResult, + SequencedData: marshalledAllocation, }, } - // this auction result should be ignored - auctionResultSequenceData2 := &sequencerblockv1.RollupData{ + // this allocation should be ignored + allocationSequenceData2 := &sequencerblockv1.RollupData{ Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: marshalledAuctionResult, + SequencedData: marshalledAllocation, }, } seqData1 := &sequencerblockv1.RollupData{ @@ -553,13 +557,13 @@ func TestUnbundleRollupDataWithDuplicateAuctionResults(t *testing.T) { SourceActionIndex: 0, }}} - finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, auctionResultSequenceData, auctionResultSequenceData2, depositTx} + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, allocationSequenceData2, depositTx} - txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") - // auction result txs should be the first 3 + // allocation txs should be the first 3 require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") @@ -567,8 +571,9 @@ func TestUnbundleRollupDataWithDuplicateAuctionResults(t *testing.T) { require.True(t, bytes.Equal(txsToProcess[4].Hash().Bytes(), tx5.Hash().Bytes()), "expected tx5 to be fifth") } -func TestUnbundleRollupDataWithDuplicateInvalidAuctionResults(t *testing.T) { - ethservice, serviceV1Alpha1, trustedBuilderPrivKey := SetupSharedService(t, 10) +func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { + ethservice, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix baseSequencerBlockHash := []byte("sequencer block hash") prevRollupBlockHash := []byte("prev rollup block hash") @@ -602,63 +607,63 @@ func TestUnbundleRollupDataWithDuplicateInvalidAuctionResults(t *testing.T) { invalidMarshalledTx2, err := invalidTx2.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - allocation := &bundlev1alpha1.Bundle{ + payload := &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, BaseSequencerBlockHash: baseSequencerBlockHash, PrevRollupBlockHash: prevRollupBlockHash, } - marshalledAllocation, err := proto.Marshal(allocation) + marshalledPayload, err := proto.Marshal(payload) require.NoError(t, err, "failed to marshal allocation: %v", err) - signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + signedPayload, err := trustedBuilderPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ Hash: 0, Context: "", }) require.NoError(t, err, "failed to sign allocation: %v", err) - invalidAllocation := &bundlev1alpha1.Bundle{ + invalidPayload := &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{invalidMarshalledTx1, invalidMarshalledTx2}, BaseSequencerBlockHash: baseSequencerBlockHash, PrevRollupBlockHash: prevRollupBlockHash, } - marshalledInvalidAllocation, err := proto.Marshal(invalidAllocation) + marshalledInvalidPayload, err := proto.Marshal(invalidPayload) require.NoError(t, err, "failed to marshal invalid allocation: %v", err) - signedInvalidAllocation, err := invalidTrustedBuilderprivkey.Sign(nil, marshalledInvalidAllocation, &ed25519.Options{ + signedInvalidPayload, err := invalidTrustedBuilderprivkey.Sign(nil, marshalledInvalidPayload, &ed25519.Options{ Hash: 0, Context: "", }) require.NoError(t, err, "failed to sign allocation: %v", err) - auctionResult := &bundlev1alpha1.AuctionResult{ - Signature: signedAllocation, - PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), - Allocation: allocation, + allocation := &bundlev1alpha1.Allocation{ + Signature: signedPayload, + PublicKey: trustedBuilderPubKey, + Payload: payload, } - marshalledAuctionResult, err := proto.Marshal(auctionResult) - require.NoError(t, err, "failed to marshal auction result: %v", err) + marshalledAllocation, err := proto.Marshal(allocation) + require.NoError(t, err, "failed to marshal allocation: %v", err) - invalidAuctionResult := &bundlev1alpha1.AuctionResult{ - Signature: signedInvalidAllocation, + invalidAllocation := &bundlev1alpha1.Allocation{ + Signature: signedInvalidPayload, // trying to spoof the actual trusted builder key - PublicKey: serviceV1Alpha1.TrustedBuilderPublicKey(), - Allocation: invalidAllocation, + PublicKey: trustedBuilderPubKey, + Payload: invalidPayload, } - marshalledInvalidAuctionResult, err := proto.Marshal(invalidAuctionResult) - require.NoError(t, err, "failed to marshal invalid auction result: %v", err) + marshalledInvalidAllocation, err := proto.Marshal(invalidAllocation) + require.NoError(t, err, "failed to marshal invalid allocation: %v", err) - auctionResultSequenceData := &sequencerblockv1.RollupData{ + allocationSequenceData := &sequencerblockv1.RollupData{ Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: marshalledAuctionResult, + SequencedData: marshalledAllocation, }, } - // this auction result should be ignored - invalidAuctionResultSequenceData := &sequencerblockv1.RollupData{ + // this allocation should be ignored + invalidAllocationSequenceData := &sequencerblockv1.RollupData{ Value: &sequencerblockv1.RollupData_SequencedData{ - SequencedData: marshalledInvalidAuctionResult, + SequencedData: marshalledInvalidAllocation, }, } seqData1 := &sequencerblockv1.RollupData{ @@ -692,13 +697,13 @@ func TestUnbundleRollupDataWithDuplicateInvalidAuctionResults(t *testing.T) { SourceActionIndex: 0, }}} - finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, auctionResultSequenceData, invalidAuctionResultSequenceData, depositTx} + finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, invalidAllocationSequenceData, depositTx} - txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey()) + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") - // auction result txs should be the first 3 + // allocation txs should be the first 3 require.True(t, bytes.Equal(txsToProcess[0].Hash().Bytes(), tx1.Hash().Bytes()), "expected tx1 to be first") require.True(t, bytes.Equal(txsToProcess[1].Hash().Bytes(), tx2.Hash().Bytes()), "expected tx2 to be second") require.True(t, bytes.Equal(txsToProcess[2].Hash().Bytes(), tx3.Hash().Bytes()), "expected tx3 to be third") diff --git a/params/config.go b/params/config.go index a28d1acfd..5783ef162 100644 --- a/params/config.go +++ b/params/config.go @@ -388,7 +388,7 @@ type ChainConfig struct { AstriaBridgeAddressConfigs []AstriaBridgeAddressConfig `json:"astriaBridgeAddresses,omitempty"` AstriaFeeCollectors map[uint32]common.Address `json:"astriaFeeCollectors"` AstriaEIP1559Params *AstriaEIP1559Params `json:"astriaEIP1559Params,omitempty"` - AstriaTrustedBuilderPublicKeys map[uint32]string `json:"astriaTrustedBuilderPublicKeys,omitempty"` + AstriaTrustedBuilderAddresses map[uint32]string `json:"astriaTrustedBuilderAddresses,omitempty"` } func (c *ChainConfig) AstriaExtraData() []byte { From 2992b5beb66378c5df952e49d133cc41d3f13346 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 21 Nov 2024 15:30:30 +0530 Subject: [PATCH 62/79] change trusted builder instances to auctioneer --- genesis.json | 2 +- grpc/execution/server.go | 16 ++++++------ grpc/optimistic/server.go | 6 ++--- grpc/shared/container.go | 28 ++++++++++----------- grpc/shared/test_setup.go | 6 ++--- grpc/shared/test_utils.go | 10 ++++---- grpc/shared/validation.go | 10 ++++---- grpc/shared/validation_test.go | 46 +++++++++++++++++----------------- params/config.go | 2 +- 9 files changed, 63 insertions(+), 63 deletions(-) diff --git a/genesis.json b/genesis.json index 62123f5ad..e65d980fb 100644 --- a/genesis.json +++ b/genesis.json @@ -40,7 +40,7 @@ } } ], - "astriaTrustedBuilderPublicKeys": { + "astriaAuctioneerAddresses": { "1": "" }, "astriaFeeCollectors": { diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 46009bee3..49f12c7b4 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -172,7 +172,7 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria addressPrefix := s.Bc().Config().AstriaSequencerAddressPrefix - txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes(), s.TrustedBuilderPublicKey(), addressPrefix) + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, s.BridgeAddresses(), s.BridgeAllowedAssets(), prevHeadHash.Bytes(), s.AuctioneerAddress(), addressPrefix) // This set of ordered TXs on the TxPool is has been configured to be used by // the Miner when building a payload. @@ -222,12 +222,12 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria s.SetNextFeeRecipient(next) } - if address, ok := s.Bc().Config().AstriaTrustedBuilderAddresses[res.Number+1]; ok { + if address, ok := s.Bc().Config().AstriaAuctioneerAddresses[res.Number+1]; ok { if err := shared.ValidateBech32mAddress(address, addressPrefix); err != nil { - log.Error("trusted builder address is not a valid bech32 address", "block", res.Number+1, "address", address) + log.Error("auctioneer address is not a valid bech32 address", "block", res.Number+1, "address", address) } - s.SetTrustedBuilderPublicKey(address) + s.SetAuctioneerAddress(address) } log.Info("ExecuteBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) @@ -441,10 +441,10 @@ func (s *ExecutionServiceServerV1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } -func (s *ExecutionServiceServerV1) TrustedBuilderPublicKey() string { - return s.sharedServiceContainer.TrustedBuilderPublicKey() +func (s *ExecutionServiceServerV1) AuctioneerAddress() string { + return s.sharedServiceContainer.AuctioneerAddress() } -func (s *ExecutionServiceServerV1) SetTrustedBuilderPublicKey(trustedBuilderPublicKey string) { - s.sharedServiceContainer.SetTrustedBuilderPublicKey(trustedBuilderPublicKey) +func (s *ExecutionServiceServerV1) SetAuctioneerAddress(auctioneerAddress string) { + s.sharedServiceContainer.SetAuctioneerAddress(auctioneerAddress) } diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 8718db269..20202fecc 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -172,7 +172,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, addressPrefix := o.Bc().Config().AstriaSequencerAddressPrefix - txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes(), o.TrustedBuilderPublicKey(), addressPrefix) + txsToProcess := shared.UnbundleRollupDataTransactions(req.Transactions, height, o.BridgeAddresses(), o.BridgeAllowedAssets(), softBlock.Hash().Bytes(), o.AuctioneerAddress(), addressPrefix) // Build a payload to add to the chain payloadAttributes := &miner.BuildPayloadArgs{ @@ -274,6 +274,6 @@ func (s *OptimisticServiceV1Alpha1) SyncMethodsCalled() bool { return s.sharedServiceContainer.SyncMethodsCalled() } -func (s *OptimisticServiceV1Alpha1) TrustedBuilderPublicKey() string { - return s.sharedServiceContainer.TrustedBuilderPublicKey() +func (s *OptimisticServiceV1Alpha1) AuctioneerAddress() string { + return s.sharedServiceContainer.AuctioneerAddress() } diff --git a/grpc/shared/container.go b/grpc/shared/container.go index 06dcbe56a..bf5e23b78 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -25,8 +25,8 @@ type SharedServiceContainer struct { bridgeAddresses map[string]*params.AstriaBridgeAddressConfig // astria bridge addess to config for that bridge account bridgeAllowedAssets map[string]struct{} // a set of allowed asset IDs structs are left empty - // trusted builder public key is a bech32m address - trustedBuilderPublicKey atomic.Pointer[string] + // auctioneer address is a bech32m address + auctioneerAddress atomic.Pointer[string] // TODO: bharath - we could make this an atomic pointer??? nextFeeRecipient common.Address // Fee recipient for the next block @@ -101,19 +101,19 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro } } - trustedBuilderBlockMap := bc.Config().AstriaTrustedBuilderAddresses - trustedBuilderPublicKey := "" - if trustedBuilderBlockMap == nil { - return nil, errors.New("trusted builder public keys not set") + auctioneerAddressesBlockMap := bc.Config().AstriaAuctioneerAddresses + auctioneerAddress := "" + if auctioneerAddressesBlockMap == nil { + return nil, errors.New("auctioneer addresses not set") } else { maxHeightCollectorMatch := uint32(0) - for height, address := range trustedBuilderBlockMap { + for height, address := range auctioneerAddressesBlockMap { if height <= nextBlock && height > maxHeightCollectorMatch { maxHeightCollectorMatch = height if err := ValidateBech32mAddress(address, bc.Config().AstriaSequencerAddressPrefix); err != nil { - return nil, errors.Wrapf(err, "trusted builder address %s at height %d is invalid", address, height) + return nil, errors.Wrapf(err, "auctioneer address %s at height %d is invalid", address, height) } - trustedBuilderPublicKey = address + auctioneerAddress = address } } } @@ -126,7 +126,7 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro nextFeeRecipient: nextFeeRecipient, } - sharedServiceContainer.SetTrustedBuilderPublicKey(trustedBuilderPublicKey) + sharedServiceContainer.SetAuctioneerAddress(auctioneerAddress) return sharedServiceContainer, nil } @@ -184,10 +184,10 @@ func (s *SharedServiceContainer) BridgeAllowedAssets() map[string]struct{} { return s.bridgeAllowedAssets } -func (s *SharedServiceContainer) TrustedBuilderPublicKey() string { - return *s.trustedBuilderPublicKey.Load() +func (s *SharedServiceContainer) AuctioneerAddress() string { + return *s.auctioneerAddress.Load() } -func (s *SharedServiceContainer) SetTrustedBuilderPublicKey(newAddress string) { - s.trustedBuilderPublicKey.Store(&newAddress) +func (s *SharedServiceContainer) SetAuctioneerAddress(newAddress string) { + s.auctioneerAddress.Store(&newAddress) } diff --git a/grpc/shared/test_setup.go b/grpc/shared/test_setup.go index d916865b6..45078ea15 100644 --- a/grpc/shared/test_setup.go +++ b/grpc/shared/test_setup.go @@ -10,7 +10,7 @@ import ( func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, *SharedServiceContainer, ed25519.PrivateKey, ed25519.PublicKey) { t.Helper() - genesis, blocks, bridgeAddress, feeCollectorKey, trustedBuilderPrivkey, trustedBuilderPubKey := GenerateMergeChain(noOfBlocksToGenerate, true) + genesis, blocks, bridgeAddress, feeCollectorKey, auctioneerPrivKey, auctioneerPubKey := GenerateMergeChain(noOfBlocksToGenerate, true) ethservice := StartEthService(t, genesis) sharedService, err := NewSharedServiceContainer(ethservice) @@ -29,8 +29,8 @@ func SetupSharedService(t *testing.T, noOfBlocksToGenerate int) (*eth.Ethereum, _, err = ethservice.BlockChain().InsertChain(blocks) require.Nil(t, err, "can't insert blocks") - // FIXME - this interface isn't right for the tests, we shouldn't be exposing the trusted builder priv key like this + // FIXME - this interface isn't right for the tests, we shouldn't be exposing the auctioneer priv key like this // we should instead allow the test to create it and pass it to the shared service container in the constructor // but that can make the codebase a bit weird, so we can leave it like this for now - return ethservice, sharedService, trustedBuilderPrivkey, trustedBuilderPubKey + return ethservice, sharedService, auctioneerPrivKey, auctioneerPubKey } diff --git a/grpc/shared/test_utils.go b/grpc/shared/test_utils.go index 3031d6e36..69926120f 100644 --- a/grpc/shared/test_utils.go +++ b/grpc/shared/test_utils.go @@ -62,17 +62,17 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri config.AstriaCelestiaInitialHeight = 10 config.AstriaCelestiaHeightVariance = 10 - trustedBuilderPubkey, trustedBuilderPrivkey, err := ed25519.GenerateKey(nil) + auctioneerPubKey, auctioneerPrivKey, err := ed25519.GenerateKey(nil) if err != nil { panic(err) } - trustedBuilderAddress, err := EncodeFromPublicKey(config.AstriaSequencerAddressPrefix, trustedBuilderPubkey) + auctioneerAddress, err := EncodeFromPublicKey(config.AstriaSequencerAddressPrefix, auctioneerPubKey) if err != nil { panic(err) } - config.AstriaTrustedBuilderAddresses = make(map[uint32]string) - config.AstriaTrustedBuilderAddresses[1] = trustedBuilderAddress + config.AstriaAuctioneerAddresses = make(map[uint32]string) + config.AstriaAuctioneerAddresses[1] = auctioneerAddress bech32mBridgeAddress, err := bech32.EncodeM(config.AstriaSequencerAddressPrefix, bridgeAddressBytes) if err != nil { @@ -127,7 +127,7 @@ func GenerateMergeChain(n int, merged bool) (*core.Genesis, []*types.Block, stri config.TerminalTotalDifficulty = totalDifficulty } - return genesis, blocks, bech32mBridgeAddress, feeCollectorKey, trustedBuilderPrivkey, trustedBuilderPubkey + return genesis, blocks, bech32mBridgeAddress, feeCollectorKey, auctioneerPrivKey, auctioneerPubKey } // startEthService creates a full node instance for testing. diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index 6d00f9f32..b6cd82839 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -113,7 +113,7 @@ func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*type return ethTx, nil } -func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHash []byte, trustedBuilderBech32Address string, addressPrefix string) (types.Transactions, error) { +func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) (types.Transactions, error) { processedTxs := types.Transactions{} payload := allocation.GetPayload() @@ -126,8 +126,8 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas if err != nil { return nil, errors.Wrapf(err, "failed to encode public key to bech32m address: %s", publicKey) } - if trustedBuilderBech32Address != bech32Address { - return nil, errors.Errorf("public key in allocation does not match trusted builder public key. expected: %s, got: %s", trustedBuilderBech32Address, bech32Address) + if auctioneerBech32Address != bech32Address { + return nil, errors.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address) } message, err := proto.Marshal(allocation.GetPayload()) @@ -157,7 +157,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas // `UnbundleRollupDataTransactions` takes in a list of rollup data transactions and returns a list of Ethereum transactions. // TODO - this function has become too big. we should start breaking it down func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, - bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, trustedBuilderBech32Address string, addressPrefix string) types.Transactions { + bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) types.Transactions { processedTxs := types.Transactions{} allocationTxs := types.Transactions{} // we just return the allocation here and do not unmarshall the transactions in the bundle if we find it @@ -179,7 +179,7 @@ func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height u tempAllocation := &bundlev1alpha1.Allocation{} err := proto.Unmarshal(sequenceData, tempAllocation) if err == nil { - unmarshalledAllocationTxs, err := unmarshallAllocationTxs(tempAllocation, prevBlockHash, trustedBuilderBech32Address, addressPrefix) + unmarshalledAllocationTxs, err := unmarshallAllocationTxs(tempAllocation, prevBlockHash, auctioneerBech32Address, addressPrefix) if err != nil { log.Error("failed to unmarshall allocation transactions", "error", err) continue diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 734fcc249..57c404366 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -73,7 +73,7 @@ func generateBech32MAddress() string { } func TestUnmarshallAllocationTxs(t *testing.T) { - ethService, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + ethService, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) addressPrefix := ethService.BlockChain().Config().AstriaSequencerAddressPrefix tx1 := transaction(0, 1000, TestKey) @@ -98,7 +98,7 @@ func TestUnmarshallAllocationTxs(t *testing.T) { marshalledAllocation, err := proto.Marshal(validPayload) require.NoError(t, err, "failed to marshal payload: %v", err) - signedAllocation, err := trustedBuilderPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ + signedAllocation, err := auctioneerPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ Hash: 0, Context: "", }) @@ -143,13 +143,13 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, prevBlockHash: []byte("prev rollup block hash"), expectedOutput: types.Transactions{}, - wantErr: "public key in allocation does not match trusted builder public key", + wantErr: "address in allocation does not match auctioneer address", }, { description: "invalid signature", allocation: &bundlev1alpha1.Allocation{ Signature: []byte("invalid signature"), - PublicKey: trustedBuilderPubKey, + PublicKey: auctioneerPubKey, Payload: &bundlev1alpha1.Bundle{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, @@ -165,7 +165,7 @@ func TestUnmarshallAllocationTxs(t *testing.T) { description: "valid allocation", allocation: &bundlev1alpha1.Allocation{ Signature: signedAllocation, - PublicKey: trustedBuilderPubKey, + PublicKey: auctioneerPubKey, Payload: validPayload, }, prevBlockHash: []byte("prev rollup block hash"), @@ -176,7 +176,7 @@ func TestUnmarshallAllocationTxs(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - finalTxs, err := unmarshallAllocationTxs(test.allocation, test.prevBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) + finalTxs, err := unmarshallAllocationTxs(test.allocation, test.prevBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) if test.wantErr == "" && err == nil { for _, tx := range test.expectedOutput { foundTx := false @@ -369,7 +369,7 @@ func TestValidateAndUnmarshallSequenceAction(t *testing.T) { } func TestUnbundleRollupData(t *testing.T) { - ethservice, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix @@ -403,7 +403,7 @@ func TestUnbundleRollupData(t *testing.T) { marshalledPayload, err := proto.Marshal(payload) require.NoError(t, err, "failed to marshal payload: %v", err) - signedPayload, err := trustedBuilderPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ + signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ Hash: 0, Context: "", }) @@ -411,7 +411,7 @@ func TestUnbundleRollupData(t *testing.T) { allocation := &bundlev1alpha1.Allocation{ Signature: signedPayload, - PublicKey: trustedBuilderPubKey, + PublicKey: auctioneerPubKey, Payload: payload, } @@ -455,7 +455,7 @@ func TestUnbundleRollupData(t *testing.T) { finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, depositTx} - txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") @@ -468,7 +468,7 @@ func TestUnbundleRollupData(t *testing.T) { } func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { - ethservice, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix baseSequencerBlockHash := []byte("sequencer block hash") @@ -501,7 +501,7 @@ func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { marshalledPayload, err := proto.Marshal(payload) require.NoError(t, err, "failed to marshal payload: %v", err) - signedPayload, err := trustedBuilderPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ + signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ Hash: 0, Context: "", }) @@ -509,7 +509,7 @@ func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { allocation := &bundlev1alpha1.Allocation{ Signature: signedPayload, - PublicKey: trustedBuilderPubKey, + PublicKey: auctioneerPubKey, Payload: payload, } @@ -559,7 +559,7 @@ func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, allocationSequenceData2, depositTx} - txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") @@ -572,14 +572,14 @@ func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { } func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { - ethservice, serviceV1Alpha1, trustedBuilderPrivKey, trustedBuilderPubKey := SetupSharedService(t, 10) + ethservice, serviceV1Alpha1, auctioneerPrivKey, auctioneerPubKey := SetupSharedService(t, 10) addressPrefix := ethservice.BlockChain().Config().AstriaSequencerAddressPrefix baseSequencerBlockHash := []byte("sequencer block hash") prevRollupBlockHash := []byte("prev rollup block hash") - _, invalidTrustedBuilderprivkey, err := ed25519.GenerateKey(nil) - require.Nil(t, err, "failed to generate invalid trusted builder key: %v", err) + _, invalidAuctioneerprivkey, err := ed25519.GenerateKey(nil) + require.Nil(t, err, "failed to generate invalid auctioneer key: %v", err) // txs in tx1 := transaction(0, 1000, TestKey) @@ -616,7 +616,7 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { marshalledPayload, err := proto.Marshal(payload) require.NoError(t, err, "failed to marshal allocation: %v", err) - signedPayload, err := trustedBuilderPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ + signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ Hash: 0, Context: "", }) @@ -631,7 +631,7 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { marshalledInvalidPayload, err := proto.Marshal(invalidPayload) require.NoError(t, err, "failed to marshal invalid allocation: %v", err) - signedInvalidPayload, err := invalidTrustedBuilderprivkey.Sign(nil, marshalledInvalidPayload, &ed25519.Options{ + signedInvalidPayload, err := invalidAuctioneerprivkey.Sign(nil, marshalledInvalidPayload, &ed25519.Options{ Hash: 0, Context: "", }) @@ -639,7 +639,7 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { allocation := &bundlev1alpha1.Allocation{ Signature: signedPayload, - PublicKey: trustedBuilderPubKey, + PublicKey: auctioneerPubKey, Payload: payload, } @@ -648,8 +648,8 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { invalidAllocation := &bundlev1alpha1.Allocation{ Signature: signedInvalidPayload, - // trying to spoof the actual trusted builder key - PublicKey: trustedBuilderPubKey, + // trying to spoof the actual auctioneer key + PublicKey: auctioneerPubKey, Payload: invalidPayload, } marshalledInvalidAllocation, err := proto.Marshal(invalidAllocation) @@ -699,7 +699,7 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { finalTxs := []*sequencerblockv1.RollupData{seqData1, seqData2, allocationSequenceData, invalidAllocationSequenceData, depositTx} - txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.TrustedBuilderPublicKey(), addressPrefix) + txsToProcess := UnbundleRollupDataTransactions(finalTxs, 2, serviceV1Alpha1.BridgeAddresses(), serviceV1Alpha1.BridgeAllowedAssets(), prevRollupBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) require.Equal(t, txsToProcess.Len(), 6, "expected 6 txs to process") diff --git a/params/config.go b/params/config.go index 5783ef162..94a960268 100644 --- a/params/config.go +++ b/params/config.go @@ -388,7 +388,7 @@ type ChainConfig struct { AstriaBridgeAddressConfigs []AstriaBridgeAddressConfig `json:"astriaBridgeAddresses,omitempty"` AstriaFeeCollectors map[uint32]common.Address `json:"astriaFeeCollectors"` AstriaEIP1559Params *AstriaEIP1559Params `json:"astriaEIP1559Params,omitempty"` - AstriaTrustedBuilderAddresses map[uint32]string `json:"astriaTrustedBuilderAddresses,omitempty"` + AstriaAuctioneerAddresses map[uint32]string `json:"astriaAuctionerAddresses,omitempty"` } func (c *ChainConfig) AstriaExtraData() []byte { From a82a00e4a24c1f9fc3b9dd838c56a5cce2727efe Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 21 Nov 2024 15:37:58 +0530 Subject: [PATCH 63/79] fix typo --- params/config.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/params/config.go b/params/config.go index 94a960268..7d9ee2a79 100644 --- a/params/config.go +++ b/params/config.go @@ -388,7 +388,7 @@ type ChainConfig struct { AstriaBridgeAddressConfigs []AstriaBridgeAddressConfig `json:"astriaBridgeAddresses,omitempty"` AstriaFeeCollectors map[uint32]common.Address `json:"astriaFeeCollectors"` AstriaEIP1559Params *AstriaEIP1559Params `json:"astriaEIP1559Params,omitempty"` - AstriaAuctioneerAddresses map[uint32]string `json:"astriaAuctionerAddresses,omitempty"` + AstriaAuctioneerAddresses map[uint32]string `json:"astriaAuctioneerAddresses,omitempty"` } func (c *ChainConfig) AstriaExtraData() []byte { From 9809d0c7ae8898324df135f456977767056402cc Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 20 Nov 2024 16:39:20 +0530 Subject: [PATCH 64/79] add an api to query the optimistic block --- eth/api_backend.go | 7 +++++++ grpc/execution/server.go | 2 +- internal/ethapi/api.go | 1 + rpc/types.go | 11 ++++++----- 4 files changed, 15 insertions(+), 6 deletions(-) diff --git a/eth/api_backend.go b/eth/api_backend.go index 304904365..2b5c820c4 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -143,6 +143,13 @@ func (b *EthAPIBackend) BlockByNumber(ctx context.Context, number rpc.BlockNumbe } return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil } + if number == rpc.OptimisticBlockNumber { + header := b.eth.blockchain.CurrentOptimisticBlock() + if header == nil { + return nil, errors.New("optimistic block not found") + } + return b.eth.blockchain.GetBlock(header.Hash(), header.Number.Uint64()), nil + } return b.eth.blockchain.GetBlockByNumber(uint64(number)), nil } diff --git a/grpc/execution/server.go b/grpc/execution/server.go index 49f12c7b4..c0c15fb8a 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -181,7 +181,7 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria // Build a payload to add to the chain payloadAttributes := &miner.BuildPayloadArgs{ Parent: prevHeadHash, - Timestamp: uint64(req.GetTimestamp().GetSeconds()), + Timestamp: uint64(req.GetTimestamp().AsTime().UnixNano()), Random: common.Hash{}, FeeRecipient: s.NextFeeRecipient(), OverrideTransactions: types.Transactions{}, diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index ffa20fb62..7086f8156 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -834,6 +834,7 @@ func (s *BlockChainAPI) GetHeaderByHash(ctx context.Context, hash common.Hash) m // - When blockNr is -2 the chain latest block is returned. // - When blockNr is -3 the chain finalized block is returned. // - When blockNr is -4 the chain safe block is returned. +// - When blockNr is -5 the chain optimistic block is returned. // - When fullTx is true all transactions in the block are returned, otherwise // only the transaction hash is returned. func (s *BlockChainAPI) GetBlockByNumber(ctx context.Context, number rpc.BlockNumber, fullTx bool) (map[string]interface{}, error) { diff --git a/rpc/types.go b/rpc/types.go index 2e53174b8..5f2ae24e5 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -63,11 +63,12 @@ type jsonWriter interface { type BlockNumber int64 const ( - SafeBlockNumber = BlockNumber(-4) - FinalizedBlockNumber = BlockNumber(-3) - LatestBlockNumber = BlockNumber(-2) - PendingBlockNumber = BlockNumber(-1) - EarliestBlockNumber = BlockNumber(0) + OptimisticBlockNumber = BlockNumber(-5) + SafeBlockNumber = BlockNumber(-4) + FinalizedBlockNumber = BlockNumber(-3) + LatestBlockNumber = BlockNumber(-2) + PendingBlockNumber = BlockNumber(-1) + EarliestBlockNumber = BlockNumber(0) ) // UnmarshalJSON parses the given JSON fragment into a BlockNumber. It supports: From e6f134581445602a6ed8632481ff7aa2c6ddf253 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 21 Nov 2024 15:12:26 +0530 Subject: [PATCH 65/79] support querying optimistic block using the optimistic string --- grpc/execution/server.go | 4 ++-- grpc/optimistic/server.go | 4 ++-- rpc/types.go | 9 +++++++++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index c0c15fb8a..a984e4ffc 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -181,7 +181,7 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria // Build a payload to add to the chain payloadAttributes := &miner.BuildPayloadArgs{ Parent: prevHeadHash, - Timestamp: uint64(req.GetTimestamp().AsTime().UnixNano()), + Timestamp: uint64(req.GetTimestamp().GetSeconds()), Random: common.Hash{}, FeeRecipient: s.NextFeeRecipient(), OverrideTransactions: types.Transactions{}, @@ -190,7 +190,7 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria payload, err := s.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) - return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") + return nil, status.Errorf(codes.InvalidArgument, "Could not build block with provided txs: %v", err) } // call blockchain.InsertChain to actually execute and write the blocks to diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 20202fecc..0c20187d7 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -120,7 +120,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist // execute the optimistic block and wait for the mempool clearing event optimisticBlock, err := o.ExecuteOptimisticBlock(stream.Context(), baseBlock) if err != nil { - return status.Error(codes.Internal, "failed to execute optimistic block") + return status.Errorf(codes.Internal, "failed to execute optimistic block: %v", err) } optimisticBlockHash := common.BytesToHash(optimisticBlock.Hash) @@ -186,7 +186,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, payload, err := o.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) - return nil, status.Error(codes.InvalidArgument, "Could not build block with provided txs") + return nil, status.Errorf(codes.InvalidArgument, "Could not build block with provided txs: %v", err) } block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil) diff --git a/rpc/types.go b/rpc/types.go index 5f2ae24e5..249efc51a 100644 --- a/rpc/types.go +++ b/rpc/types.go @@ -99,6 +99,9 @@ func (bn *BlockNumber) UnmarshalJSON(data []byte) error { case "safe": *bn = SafeBlockNumber return nil + case "optimistic": + *bn = OptimisticBlockNumber + return nil } blckNum, err := hexutil.DecodeUint64(input) @@ -136,6 +139,8 @@ func (bn BlockNumber) String() string { return "finalized" case SafeBlockNumber: return "safe" + case OptimisticBlockNumber: + return "optimistic" default: if bn < 0 { return fmt.Sprintf("", bn) @@ -189,6 +194,10 @@ func (bnh *BlockNumberOrHash) UnmarshalJSON(data []byte) error { bn := SafeBlockNumber bnh.BlockNumber = &bn return nil + case "optimistic": + bn := OptimisticBlockNumber + bnh.BlockNumber = &bn + return nil default: if len(input) == 66 { hash := common.Hash{} From 2561dc3594f01dc88584f00ed0ae90efd5f6aef4 Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 27 Nov 2024 14:22:45 +0530 Subject: [PATCH 66/79] close the stream when we get the done signal --- grpc/optimistic/server.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 0c20187d7..72fd9e35b 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -139,6 +139,8 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") case err := <-mempoolClearingEvent.Err(): return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) + case err := <-stream.Context().Done(): + return status.Errorf(codes.Internal, "stream closed with error: %v", err) } } } From df8ead5686eda4fefe30c01f8fa2168a8bf205c4 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 3 Dec 2024 13:09:50 +0530 Subject: [PATCH 67/79] add debug logs for when the stream rpcs are called --- grpc/optimistic/server.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 72fd9e35b..59ef288f6 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -54,6 +54,8 @@ func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceCon } func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { + log.Debug("GetBundleStream called") + pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) defer pendingTxEvent.Unsubscribe() @@ -101,6 +103,8 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre } func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimisticGrpc.OptimisticExecutionService_ExecuteOptimisticBlockStreamServer) error { + log.Debug("ExecuteOptimisticBlockStream called") + mempoolClearingEventCh := make(chan core.NewMempoolCleared) mempoolClearingEvent := o.Eth().TxPool().SubscribeMempoolClearance(mempoolClearingEventCh) defer mempoolClearingEvent.Unsubscribe() From 7aed2a08ff46f25763115fc6c50bf0a0c98e9307 Mon Sep 17 00:00:00 2001 From: Bharath Date: Wed, 27 Nov 2024 14:58:34 +0530 Subject: [PATCH 68/79] add some logs --- grpc/optimistic/server.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 59ef288f6..0d10b0a91 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -140,10 +140,13 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist BaseSequencerBlockHash: baseBlock.SequencerBlockHash, }) case <-time.After(500 * time.Millisecond): + log.Error("timed out waiting for mempool to clear after optimistic block execution") return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") case err := <-mempoolClearingEvent.Err(): + log.Error("error waiting for mempool clearing event", "err", err) return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) case err := <-stream.Context().Done(): + log.Error("ExecuteOptimisticBlockStream stream closed with error", "err", err) return status.Errorf(codes.Internal, "stream closed with error: %v", err) } } From 634e9620219a9e52a6394f5b5a0c128cf6d0105e Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 3 Dec 2024 13:48:42 +0530 Subject: [PATCH 69/79] add some logs --- grpc/optimistic/server.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 0d10b0a91..2c7027c92 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -88,15 +88,16 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) if err != nil { + log.Error("error sending bundle over stream", "err", err) return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) } } case err := <-pendingTxEvent.Err(): + log.Error("error waiting for pending transactions", "err", err) return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) case <-stream.Context().Done(): - log.Debug("GetBundleStream stream closed with error", "err", stream.Context().Err()) return stream.Context().Err() } } @@ -145,9 +146,8 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist case err := <-mempoolClearingEvent.Err(): log.Error("error waiting for mempool clearing event", "err", err) return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) - case err := <-stream.Context().Done(): - log.Error("ExecuteOptimisticBlockStream stream closed with error", "err", err) - return status.Errorf(codes.Internal, "stream closed with error: %v", err) + case <-stream.Context().Done(): + return stream.Context().Err() } } } From e5d90d2c88a83f65baa5809b97e6034574fef005 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 3 Dec 2024 15:11:41 +0530 Subject: [PATCH 70/79] use atomic pointer for fee recipient --- grpc/shared/container.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/grpc/shared/container.go b/grpc/shared/container.go index bf5e23b78..263971c5e 100644 --- a/grpc/shared/container.go +++ b/grpc/shared/container.go @@ -28,8 +28,7 @@ type SharedServiceContainer struct { // auctioneer address is a bech32m address auctioneerAddress atomic.Pointer[string] - // TODO: bharath - we could make this an atomic pointer??? - nextFeeRecipient common.Address // Fee recipient for the next block + nextFeeRecipient atomic.Pointer[common.Address] // Fee recipient for the next block } func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, error) { @@ -123,10 +122,10 @@ func NewSharedServiceContainer(eth *eth.Ethereum) (*SharedServiceContainer, erro bc: bc, bridgeAddresses: bridgeAddresses, bridgeAllowedAssets: bridgeAllowedAssets, - nextFeeRecipient: nextFeeRecipient, } sharedServiceContainer.SetAuctioneerAddress(auctioneerAddress) + sharedServiceContainer.SetNextFeeRecipient(nextFeeRecipient) return sharedServiceContainer, nil } @@ -168,12 +167,12 @@ func (s *SharedServiceContainer) BlockExecutionLock() *sync.Mutex { } func (s *SharedServiceContainer) NextFeeRecipient() common.Address { - return s.nextFeeRecipient + return *s.nextFeeRecipient.Load() } // assumes that the block execution lock is being held func (s *SharedServiceContainer) SetNextFeeRecipient(nextFeeRecipient common.Address) { - s.nextFeeRecipient = nextFeeRecipient + s.nextFeeRecipient.Store(&nextFeeRecipient) } func (s *SharedServiceContainer) BridgeAddresses() map[string]*params.AstriaBridgeAddressConfig { From 9e7abe712a50c7456176722457bb71cf5db87cc7 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 3 Dec 2024 15:16:02 +0530 Subject: [PATCH 71/79] remove unecessary lock --- grpc/optimistic/server.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 2c7027c92..08da038d5 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -172,9 +172,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, softBlock := o.Bc().CurrentSafeBlock() - o.BlockExecutionLock().Lock() nextFeeRecipient := o.NextFeeRecipient() - o.BlockExecutionLock().Unlock() // the height that this block will be at height := o.Bc().CurrentBlock().Number.Uint64() + 1 @@ -214,6 +212,8 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, // we store a pointer to the optimistic block in the chain so that we can use it // to retrieve the state of the optimistic block + // this method also sends an event which indicates that a new optimistic block has been set + // the mempool clearing logic is triggered when this event is received o.Bc().SetOptimistic(block) res := &astriaPb.Block{ From f7d558463eca22f69b0919664e286aca430af210 Mon Sep 17 00:00:00 2001 From: Bharath Date: Tue, 3 Dec 2024 15:53:26 +0530 Subject: [PATCH 72/79] wrap errors --- grpc/execution/server.go | 18 +++++++++--------- grpc/optimistic/server.go | 33 +++++++++++++++++++++------------ grpc/optimistic/server_test.go | 2 +- grpc/shared/validation.go | 16 ++++++++++------ 4 files changed, 41 insertions(+), 28 deletions(-) diff --git a/grpc/execution/server.go b/grpc/execution/server.go index a984e4ffc..1bb17cb1e 100644 --- a/grpc/execution/server.go +++ b/grpc/execution/server.go @@ -100,7 +100,7 @@ func (s *ExecutionServiceServerV1) GetBlock(ctx context.Context, req *astriaPb.G res, err := s.getBlockFromIdentifier(req.GetIdentifier()) if err != nil { log.Error("failed finding block", err) - return nil, err + return nil, shared.WrapError(err, "failed finding block") } log.Debug("GetBlock completed", "request", req, "response", res) @@ -125,7 +125,7 @@ func (s *ExecutionServiceServerV1) BatchGetBlocks(ctx context.Context, req *astr block, err := s.getBlockFromIdentifier(id) if err != nil { log.Error("failed finding block with id", id, "error", err) - return nil, err + return nil, shared.WrapError(err, fmt.Sprintf("failed finding block with id %s", id.String())) } blocks = append(blocks, block) @@ -190,7 +190,7 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria payload, err := s.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) - return nil, status.Errorf(codes.InvalidArgument, "Could not build block with provided txs: %v", err) + return nil, status.Errorf(codes.InvalidArgument, shared.WrapError(err, "Could not build block with provided txs").Error()) } // call blockchain.InsertChain to actually execute and write the blocks to @@ -198,12 +198,12 @@ func (s *ExecutionServiceServerV1) ExecuteBlock(ctx context.Context, req *astria block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil) if err != nil { log.Error("failed to convert executable data to block", err) - return nil, status.Error(codes.Internal, "failed to execute block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to convert executable data to block").Error()) } err = s.Bc().InsertBlockWithoutSetHead(block) if err != nil { log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", req.PrevBlockHash, "err", err) - return nil, status.Error(codes.Internal, "failed to insert block to chain") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to insert block to chain").Error()) } // remove txs from original mempool @@ -244,12 +244,12 @@ func (s *ExecutionServiceServerV1) GetCommitmentState(ctx context.Context, req * softBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentSafeBlock()) if err != nil { log.Error("error finding safe block", err) - return nil, status.Error(codes.Internal, "could not locate soft block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "could not locate soft block").Error()) } firmBlock, err := ethHeaderToExecutionBlock(s.Bc().CurrentFinalBlock()) if err != nil { log.Error("error finding final block", err) - return nil, status.Error(codes.Internal, "could not locate firm block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "could not locate firm block").Error()) } celestiaBlock := s.Bc().CurrentBaseCelestiaHeight() @@ -312,7 +312,7 @@ func (s *ExecutionServiceServerV1) UpdateCommitmentState(ctx context.Context, re if currentHead != softEthHash { if _, err := s.Bc().SetCanonical(softBlock); err != nil { log.Error("failed updating canonical chain to soft block", err) - return nil, status.Error(codes.Internal, "Could not update head to safe hash") + return nil, status.Error(codes.Internal, shared.WrapError(err, "Could not update head to safe hash").Error()) } } @@ -368,7 +368,7 @@ func (s *ExecutionServiceServerV1) getBlockFromIdentifier(identifier *astriaPb.B res, err := ethHeaderToExecutionBlock(header) if err != nil { // This should never happen since we validate header exists above. - return nil, status.Error(codes.Internal, "internal error") + return nil, status.Error(codes.Internal, shared.WrapError(err, "internal error").Error()) } return res, nil diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 08da038d5..a56c0d8ac 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -6,7 +6,6 @@ import ( astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" "context" "errors" - "fmt" "github.com/ethereum/go-ethereum/beacon/engine" "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" @@ -77,7 +76,7 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre marshalledTxs := [][]byte{} marshalledTx, err := pendingTx.MarshalBinary() if err != nil { - return status.Errorf(codes.Internal, "error marshalling tx: %v", err) + return status.Errorf(codes.Internal, shared.WrapError(err, "error marshalling tx").Error()) } marshalledTxs = append(marshalledTxs, marshalledTx) @@ -89,13 +88,18 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) if err != nil { log.Error("error sending bundle over stream", "err", err) - return status.Errorf(codes.Internal, "error sending bundle over stream: %v", err) + return status.Error(codes.Internal, shared.WrapError(err, "error sending bundle over stream").Error()) } } case err := <-pendingTxEvent.Err(): - log.Error("error waiting for pending transactions", "err", err) - return status.Errorf(codes.Internal, "error waiting for pending transactions: %v", err) + if err != nil { + log.Error("error waiting for pending transactions", "err", err) + return status.Error(codes.Internal, shared.WrapError(err, "error waiting for pending transactions").Error()) + } else { + // TODO - what is the right error code here? + return status.Error(codes.Internal, "tx pool subscription closed") + } case <-stream.Context().Done(): return stream.Context().Err() @@ -125,7 +129,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist // execute the optimistic block and wait for the mempool clearing event optimisticBlock, err := o.ExecuteOptimisticBlock(stream.Context(), baseBlock) if err != nil { - return status.Errorf(codes.Internal, "failed to execute optimistic block: %v", err) + return status.Errorf(codes.Internal, shared.WrapError(err, "failed to execute optimistic block").Error()) } optimisticBlockHash := common.BytesToHash(optimisticBlock.Hash) @@ -144,8 +148,13 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist log.Error("timed out waiting for mempool to clear after optimistic block execution") return status.Error(codes.DeadlineExceeded, "timed out waiting for mempool to clear after optimistic block execution") case err := <-mempoolClearingEvent.Err(): - log.Error("error waiting for mempool clearing event", "err", err) - return status.Errorf(codes.Internal, "error waiting for mempool clearing event: %v", err) + if err != nil { + log.Error("error waiting for mempool clearing event", "err", err) + return status.Errorf(codes.Internal, shared.WrapError(err, "error waiting for mempool clearing event").Error()) + } else { + // TODO - what is the right error code here? + return status.Error(codes.Internal, "mempool clearance subscription closed") + } case <-stream.Context().Done(): return stream.Context().Err() } @@ -159,7 +168,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, if err := validateStaticExecuteOptimisticBlockRequest(req); err != nil { log.Error("ExecuteOptimisticBlock called with invalid BaseBlock", "err", err) - return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("BaseBlock is invalid: %s", err.Error())) + return nil, status.Error(codes.InvalidArgument, shared.WrapError(err, "invalid BaseBlock").Error()) } if !o.SyncMethodsCalled() { @@ -193,13 +202,13 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, payload, err := o.Eth().Miner().BuildPayload(payloadAttributes) if err != nil { log.Error("failed to build payload", "err", err) - return nil, status.Errorf(codes.InvalidArgument, "Could not build block with provided txs: %v", err) + return nil, status.Errorf(codes.InvalidArgument, shared.WrapError(err, "failed to build payload").Error()) } block, err := engine.ExecutableDataToBlock(*payload.Resolve().ExecutionPayload, nil, nil) if err != nil { log.Error("failed to convert executable data to block", err) - return nil, status.Error(codes.Internal, "failed to execute block") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to convert executable data to block").Error()) } // this will insert the optimistic block into the chain and persist it's state without @@ -207,7 +216,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, err = o.Bc().InsertBlockWithoutSetHead(block) if err != nil { log.Error("failed to insert block to chain", "hash", block.Hash(), "prevHash", block.ParentHash(), "err", err) - return nil, status.Error(codes.Internal, "failed to insert block to chain") + return nil, status.Error(codes.Internal, shared.WrapError(err, "failed to insert block to chain").Error()) } // we store a pointer to the optimistic block in the chain so that we can use it diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index ff2359520..b0dd97720 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -334,7 +334,7 @@ func TestNewOptimisticServiceServerV1Alpha_StreamBundles(t *testing.T) { select { case err := <-errorCh: - require.ErrorContains(t, err, "error waiting for pending transactions") + require.ErrorContains(t, err, "tx pool subscription closed") } require.Len(t, mockServerSideStreaming.sentResponses, 5, "Number of responses should match the number of requests") diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index b6cd82839..e0bd7ea84 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -7,6 +7,7 @@ import ( "bytes" "crypto/ed25519" "crypto/sha256" + "errors" "fmt" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/contracts" @@ -14,10 +15,13 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/params" "github.com/golang/protobuf/proto" - "github.com/pkg/errors" "math/big" ) +func WrapError(err error, msg string) error { + return fmt.Errorf("%s: %w", msg, err) +} + func protoU128ToBigInt(u128 *primitivev1.Uint128) *big.Int { lo := big.NewInt(0).SetUint64(u128.Lo) hi := big.NewInt(0).SetUint64(u128.Hi) @@ -124,20 +128,20 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas publicKey := ed25519.PublicKey(allocation.GetPublicKey()) bech32Address, err := EncodeFromPublicKey(addressPrefix, publicKey) if err != nil { - return nil, errors.Wrapf(err, "failed to encode public key to bech32m address: %s", publicKey) + return nil, WrapError(err, fmt.Sprintf("failed to encode public key to bech32m address: %s", publicKey)) } if auctioneerBech32Address != bech32Address { - return nil, errors.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address) + return nil, fmt.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address) } message, err := proto.Marshal(allocation.GetPayload()) if err != nil { - return nil, errors.Wrap(err, "failed to marshal allocation") + return nil, WrapError(err, "failed to marshal allocation") } signature := allocation.GetSignature() if !ed25519.Verify(publicKey, message, signature) { - return nil, errors.New("failed to verify signature") + return nil, fmt.Errorf("failed to verify signature") } // unmarshall the transactions in the bundle @@ -145,7 +149,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas ethtx := new(types.Transaction) err := ethtx.UnmarshalBinary(allocationTx) if err != nil { - return nil, errors.Wrap(err, "failed to unmarshall allocation transaction") + return nil, WrapError(err, "failed to unmarshall allocation transaction") } processedTxs = append(processedTxs, ethtx) } From 1457ce3bccfd5acd122c4b6e9338ae441dcee216 Mon Sep 17 00:00:00 2001 From: Bharath Date: Fri, 3 Jan 2025 12:04:09 +0530 Subject: [PATCH 73/79] buffer the reserved addresses and remove them at once --- core/txpool/legacypool/legacypool.go | 16 +++++++++++----- core/txpool/legacypool/legacypool_test.go | 20 +++++++------------- grpc/shared/validation.go | 7 +++++-- grpc/shared/validation_test.go | 2 +- 4 files changed, 24 insertions(+), 21 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 81dfb958e..46e594d7d 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1782,9 +1782,12 @@ func (pool *LegacyPool) truncateQueue() { // it assumes that the pool lock is being held func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { // Iterate over all accounts and demote any non-executable transactions + addrsForWhichTxsRemoved := []common.Address{} + for addr, list := range pool.pending { dropped, invalids := list.ClearList() - pendingGauge.Dec(int64(len(dropped) + len(invalids))) + + pendingGauge.Dec(int64(dropped.Len() + invalids.Len())) for _, tx := range dropped { pool.all.Remove(tx.Hash()) @@ -1796,12 +1799,14 @@ func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { if list.Empty() { delete(pool.pending, addr) delete(pool.beats, addr) + + addrsForWhichTxsRemoved = append(addrsForWhichTxsRemoved, addr) } } for addr, list := range pool.queue { dropped, invalids := list.ClearList() - queuedGauge.Dec(int64(len(dropped) + len(invalids))) + queuedGauge.Dec(int64(dropped.Len() + invalids.Len())) for _, tx := range dropped { pool.all.Remove(tx.Hash()) @@ -1811,12 +1816,13 @@ func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { } if list.Empty() { - if _, ok := pool.queue[addr]; !ok { - pool.reserve(addr, false) - } delete(pool.queue, addr) } } + + for _, addr := range addrsForWhichTxsRemoved { + pool.reserve(addr, false) + } } // demoteUnexecutables removes invalid and processed transactions from the pools diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index 00620ec68..a1395bebc 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -689,21 +689,15 @@ func TestDropping(t *testing.T) { tx11 = transaction(11, 200, key) tx12 = transaction(12, 300, key) ) - pool.all.Add(tx0, false) - pool.priced.Put(tx0, false) - pool.promoteTx(account, tx0.Hash(), tx0) - pool.all.Add(tx1, false) - pool.priced.Put(tx1, false) - pool.promoteTx(account, tx1.Hash(), tx1) + pool.add(tx0, false) + pool.add(tx1, false) + pool.add(tx2, false) + pool.add(tx10, false) + pool.add(tx11, false) + pool.add(tx12, false) - pool.all.Add(tx2, false) - pool.priced.Put(tx2, false) - pool.promoteTx(account, tx2.Hash(), tx2) - - pool.enqueueTx(tx10.Hash(), tx10, false, true) - pool.enqueueTx(tx11.Hash(), tx11, false, true) - pool.enqueueTx(tx12.Hash(), tx12, false, true) + pool.promoteExecutables([]common.Address{account}) // Check that pre and post validations leave the pool as is if pool.pending[account].Len() != 3 { diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index e0bd7ea84..2848846e8 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -118,6 +118,7 @@ func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*type } func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) (types.Transactions, error) { + log.Info("Found a potential allocation in the rollup data. Checking if it is valid.") processedTxs := types.Transactions{} payload := allocation.GetPayload() @@ -136,14 +137,15 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas message, err := proto.Marshal(allocation.GetPayload()) if err != nil { - return nil, WrapError(err, "failed to marshal allocation") + return nil, WrapError(err, "failed to marshal allocation to verify signature") } signature := allocation.GetSignature() if !ed25519.Verify(publicKey, message, signature) { - return nil, fmt.Errorf("failed to verify signature") + return nil, fmt.Errorf("signature in allocation does not match the public key") } + log.Info("Allocation is valid. Unmarshalling the transactions in the bundle.") // unmarshall the transactions in the bundle for _, allocationTx := range payload.GetTransactions() { ethtx := new(types.Transaction) @@ -162,6 +164,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas // TODO - this function has become too big. we should start breaking it down func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height uint64, bridgeAddresses map[string]*params.AstriaBridgeAddressConfig, bridgeAllowedAssets map[string]struct{}, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) types.Transactions { + processedTxs := types.Transactions{} allocationTxs := types.Transactions{} // we just return the allocation here and do not unmarshall the transactions in the bundle if we find it diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 57c404366..a917bc2c4 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -159,7 +159,7 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, prevBlockHash: []byte("prev rollup block hash"), expectedOutput: types.Transactions{}, - wantErr: "failed to verify signature", + wantErr: "signature in allocation does not match the public key", }, { description: "valid allocation", From b6e54014694c4d126fa5102e10947add79051f15 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 6 Jan 2025 11:34:53 +0530 Subject: [PATCH 74/79] avoid cleaning up duplicate addresses --- core/txpool/legacypool/legacypool.go | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/core/txpool/legacypool/legacypool.go b/core/txpool/legacypool/legacypool.go index 46e594d7d..b1397292a 100644 --- a/core/txpool/legacypool/legacypool.go +++ b/core/txpool/legacypool/legacypool.go @@ -1782,7 +1782,7 @@ func (pool *LegacyPool) truncateQueue() { // it assumes that the pool lock is being held func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { // Iterate over all accounts and demote any non-executable transactions - addrsForWhichTxsRemoved := []common.Address{} + addrsForWhichTxsRemoved := map[common.Address]bool{} for addr, list := range pool.pending { dropped, invalids := list.ClearList() @@ -1800,7 +1800,7 @@ func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { delete(pool.pending, addr) delete(pool.beats, addr) - addrsForWhichTxsRemoved = append(addrsForWhichTxsRemoved, addr) + addrsForWhichTxsRemoved[addr] = true } } @@ -1817,10 +1817,12 @@ func (pool *LegacyPool) clearPendingAndQueued(newHead *types.Header) { if list.Empty() { delete(pool.queue, addr) + + addrsForWhichTxsRemoved[addr] = true } } - for _, addr := range addrsForWhichTxsRemoved { + for addr := range addrsForWhichTxsRemoved { pool.reserve(addr, false) } } From 1cd5701e900006f240bf1e02d1fa1706b53f7749 Mon Sep 17 00:00:00 2001 From: Bharath Date: Fri, 3 Jan 2025 14:34:39 +0530 Subject: [PATCH 75/79] add metrics --- grpc/optimistic/server.go | 18 ++++++++++++------ grpc/shared/validation.go | 22 +++++++++++++++++++++- 2 files changed, 33 insertions(+), 7 deletions(-) diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index a56c0d8ac..4d229c9f6 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -38,6 +38,8 @@ type OptimisticServiceV1Alpha1 struct { var ( executeOptimisticBlockRequestCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_requests", nil) executeOptimisticBlockSuccessCount = metrics.GetOrRegisterCounter("astria/optimistic/execute_optimistic_block_success", nil) + optimisticBlockHeight = metrics.GetOrRegisterGauge("astria/execution/optimistic_block_height", nil) + txsStreamedCount = metrics.GetOrRegisterCounter("astria/optimistic/txs_streamed", nil) executionOptimisticBlockTimer = metrics.GetOrRegisterTimer("astria/optimistic/execute_optimistic_block_time", nil) ) @@ -85,6 +87,7 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre bundle.BaseSequencerBlockHash = *o.currentOptimisticSequencerBlock.Load() bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() + txsStreamedCount.Inc(1) err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) if err != nil { log.Error("error sending bundle over stream", "err", err) @@ -124,6 +127,8 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist return err } + executeOptimisticBlockRequestCount.Inc(1) + baseBlock := msg.GetBaseBlock() // execute the optimistic block and wait for the mempool clearing event @@ -140,6 +145,7 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist return status.Error(codes.Internal, "failed to clear mempool after optimistic block execution") } o.currentOptimisticSequencerBlock.Store(&baseBlock.SequencerBlockHash) + executeOptimisticBlockSuccessCount.Inc(1) err = stream.Send(&optimsticPb.ExecuteOptimisticBlockStreamResponse{ Block: optimisticBlock, BaseSequencerBlockHash: baseBlock.SequencerBlockHash, @@ -164,7 +170,10 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlockStream(stream optimist func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, req *optimsticPb.BaseBlock) (*astriaPb.Block, error) { // we need to execute the optimistic block log.Debug("ExecuteOptimisticBlock called", "timestamp", req.Timestamp, "sequencer_block_hash", req.SequencerBlockHash) - executeOptimisticBlockRequestCount.Inc(1) + + // Deliberately called after lock, to more directly measure the time spent executing + executionStart := time.Now() + defer executionOptimisticBlockTimer.UpdateSince(executionStart) if err := validateStaticExecuteOptimisticBlockRequest(req); err != nil { log.Error("ExecuteOptimisticBlock called with invalid BaseBlock", "err", err) @@ -175,10 +184,6 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, return nil, status.Error(codes.PermissionDenied, "Cannot execute block until GetGenesisInfo && GetCommitmentState methods are called") } - // Deliberately called after lock, to more directly measure the time spent executing - executionStart := time.Now() - defer executionOptimisticBlockTimer.UpdateSince(executionStart) - softBlock := o.Bc().CurrentSafeBlock() nextFeeRecipient := o.NextFeeRecipient() @@ -234,8 +239,9 @@ func (o *OptimisticServiceV1Alpha1) ExecuteOptimisticBlock(ctx context.Context, }, } + optimisticBlockHeight.Update(int64(block.NumberU64())) + log.Info("ExecuteOptimisticBlock completed", "block_num", res.Number, "timestamp", res.Timestamp) - executeOptimisticBlockSuccessCount.Inc(1) return res, nil } diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index 2848846e8..30357cb2e 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -13,9 +13,20 @@ import ( "github.com/ethereum/go-ethereum/contracts" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/golang/protobuf/proto" "math/big" + "time" +) + +var ( + successfulUnbundledAllocations = metrics.GetOrRegisterGauge("astria/optimistic/successful_unbundled_allocations", nil) + allocationsWithInvalidPrevBlockHash = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_prev_block_hash", nil) + allocationsWithInvalidPubKey = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_pub_key", nil) + allocationsWithInvalidSignature = metrics.GetOrRegisterGauge("astria/optimistic/allocations_with_invalid_signature", nil) + + allocationUnbundlingTimer = metrics.GetOrRegisterTimer("astria/optimistic/allocation_unbundling_time", nil) ) func WrapError(err error, msg string) error { @@ -118,11 +129,16 @@ func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*type } func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) (types.Transactions, error) { - log.Info("Found a potential allocation in the rollup data. Checking if it is valid.") + unbundlingStart := time.Now() + defer allocationUnbundlingTimer.UpdateSince(unbundlingStart) + processedTxs := types.Transactions{} payload := allocation.GetPayload() + log.Info("Found a potential allocation in the rollup data. Checking if it is valid.") + if !bytes.Equal(payload.PrevRollupBlockHash, prevBlockHash) { + allocationsWithInvalidPrevBlockHash.Inc(1) return nil, errors.New("prev block hash do not match in allocation") } @@ -132,6 +148,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas return nil, WrapError(err, fmt.Sprintf("failed to encode public key to bech32m address: %s", publicKey)) } if auctioneerBech32Address != bech32Address { + allocationsWithInvalidPubKey.Inc(1) return nil, fmt.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address) } @@ -142,6 +159,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas signature := allocation.GetSignature() if !ed25519.Verify(publicKey, message, signature) { + allocationsWithInvalidSignature.Inc(1) return nil, fmt.Errorf("signature in allocation does not match the public key") } @@ -156,6 +174,8 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas processedTxs = append(processedTxs, ethtx) } + successfulUnbundledAllocations.Inc(1) + return processedTxs, nil } From b555cc9ce6469fa18507daf35753cde0bba19d91 Mon Sep 17 00:00:00 2001 From: Bharath Date: Thu, 9 Jan 2025 15:38:19 +0530 Subject: [PATCH 76/79] update depot token --- .github/workflows/astria-build-and-publish-image.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/astria-build-and-publish-image.yml b/.github/workflows/astria-build-and-publish-image.yml index 73654354e..3329474bb 100644 --- a/.github/workflows/astria-build-and-publish-image.yml +++ b/.github/workflows/astria-build-and-publish-image.yml @@ -72,4 +72,4 @@ jobs: push: true tags: ${{ steps.metadata.outputs.tags }} labels: ${{ steps.metadata.outputs.labels }} - project: w2d6w0spqz \ No newline at end of file + project: w2d6w0spqz From 3284998700febf70570747e71901e6a987fce7d0 Mon Sep 17 00:00:00 2001 From: Bharath Date: Fri, 10 Jan 2025 16:18:08 +0530 Subject: [PATCH 77/79] add a few debug logs --- grpc/shared/validation.go | 9 +++++---- grpc/shared/validation_test.go | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index 30357cb2e..f7464f8d3 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -135,11 +135,11 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas processedTxs := types.Transactions{} payload := allocation.GetPayload() - log.Info("Found a potential allocation in the rollup data. Checking if it is valid.") + log.Debug("Found a potential allocation in the rollup data. Checking if it is valid.", "prevBlockHash", common.BytesToHash(prevBlockHash).String(), "auctioneerBech32Address", auctioneerBech32Address) - if !bytes.Equal(payload.PrevRollupBlockHash, prevBlockHash) { + if !bytes.Equal(payload.GetPrevRollupBlockHash(), prevBlockHash) { allocationsWithInvalidPrevBlockHash.Inc(1) - return nil, errors.New("prev block hash do not match in allocation") + return nil, errors.New("prev block hash in allocation does not match the previous block hash") } publicKey := ed25519.PublicKey(allocation.GetPublicKey()) @@ -147,6 +147,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas if err != nil { return nil, WrapError(err, fmt.Sprintf("failed to encode public key to bech32m address: %s", publicKey)) } + if auctioneerBech32Address != bech32Address { allocationsWithInvalidPubKey.Inc(1) return nil, fmt.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address) @@ -163,7 +164,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas return nil, fmt.Errorf("signature in allocation does not match the public key") } - log.Info("Allocation is valid. Unmarshalling the transactions in the bundle.") + log.Debug("Allocation is valid. Unmarshalling the transactions in the bundle.") // unmarshall the transactions in the bundle for _, allocationTx := range payload.GetTransactions() { ethtx := new(types.Transaction) diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index a917bc2c4..4bf274090 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -127,7 +127,7 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, prevBlockHash: []byte("not prev rollup block hash"), expectedOutput: types.Transactions{}, - wantErr: "prev block hash do not match in allocation", + wantErr: "prev block hash in allocation does not match the previous block hash", }, { description: "public key doesn't match", From 17dbebd1e828b5a81f6561657c6885d7e4a25ad1 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 13 Jan 2025 11:23:23 +0530 Subject: [PATCH 78/79] update to use new protos --- cmd/utils/flags.go | 6 +- go.mod | 10 ++-- go.sum | 18 ++++++ grpc/optimistic/server.go | 26 ++++----- grpc/optimistic/server_test.go | 20 +++---- grpc/optimistic/validation.go | 2 +- grpc/shared/validation.go | 16 +++--- grpc/shared/validation_test.go | 100 ++++++++++++++++----------------- node/grpcstack.go | 10 ++-- 9 files changed, 113 insertions(+), 95 deletions(-) diff --git a/cmd/utils/flags.go b/cmd/utils/flags.go index 15fd5e580..f2410a9ce 100644 --- a/cmd/utils/flags.go +++ b/cmd/utils/flags.go @@ -18,7 +18,7 @@ package utils import ( - optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc" "context" "crypto/ecdsa" "encoding/hex" @@ -2003,8 +2003,8 @@ func RegisterGraphQLService(stack *node.Node, backend ethapi.Backend, filterSyst // RegisterGRPCServices adds the gRPC API to the node. // It was done this way so that our grpc execution server can access the ethapi.Backend -func RegisterGRPCServices(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecutionServ optimisticGrpc.OptimisticExecutionServiceServer, bundleStreamingServ optimisticGrpc.BundleServiceServer, cfg *node.Config) { - if err := node.NewGRPCServerHandler(stack, execServ, optimisticExecutionServ, bundleStreamingServ, cfg); err != nil { +func RegisterGRPCServices(stack *node.Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecutionServ optimisticGrpc.OptimisticExecutionServiceServer, auctionServiceServer optimisticGrpc.AuctionServiceServer, cfg *node.Config) { + if err := node.NewGRPCServerHandler(stack, execServ, optimisticExecutionServ, auctionServiceServer, cfg); err != nil { Fatalf("Failed to register the gRPC service: %v", err) } } diff --git a/go.mod b/go.mod index 44aeb9066..c9445016e 100644 --- a/go.mod +++ b/go.mod @@ -3,10 +3,10 @@ module github.com/ethereum/go-ethereum go 1.21 require ( - buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1 - buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1 - buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1 - buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1 + buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2 + buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1 + buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1 + buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 github.com/Microsoft/go-winio v0.6.1 github.com/VictoriaMetrics/fastcache v1.12.1 @@ -79,7 +79,7 @@ require ( golang.org/x/time v0.5.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/grpc v1.64.1 - google.golang.org/protobuf v1.35.2 + google.golang.org/protobuf v1.36.2 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index 5ad9748dd..78e748f2d 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2 h1:9rMXnvPR2EX56tMIqbhOK+DvqKjWb++p5s1/bookIl8= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2/go.mod h1:hdCXwnxpMeoqXK5LCQ6gLMcmMLUDX8T9+hbxYrtj+wQ= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 h1:wOry49zAbse0G4mt2tFTwa4P2AUMuYCR/0mYcPrpcbs= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1/go.mod h1:+pVCkEpJNp2JtooS8NiydT7bO9+hu11XUZ5Z47DPtXo= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1 h1:gS4erruX5XeMN0MZ7xe4JmEIR3uCWrvzG5HGV725WiI= @@ -10,14 +12,26 @@ buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511 buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.1-20241017141511-7e4bcc0ebba5.1/go.mod h1:U4LUlabiYNYBd1pqYS9o8SsHjBRoEBysrfRVnebzJH0= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1 h1:Twi169wrd7ssCnK27Bymlytv5LmvwFV0zhKhJ64nCYM= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.35.2-00000000000000-e09c7fd3fe26.1/go.mod h1:PWzMbPHJ+Y31iNFrtSc5vy/wvm2805ZXyDZndzzFLa0= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000-1f40f333891d.1 h1:CSMft5/33d/88j3ziC4zid4DOP7X1Xv71I6pW3BUOvA= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000-1f40f333891d.1/go.mod h1:7azHjtjY3sk38xuZGlf2X6DpAPgQMoeZZMix+JkqsdU= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1 h1:cRvRFDg3/KPgEB2+8/orNwCWBhZO0wVZKij4TTKBj9w= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1/go.mod h1:oB3M+Fq9RgyUWGMqYk2FqRobQpdH1yZQZ9TYOoc4yIw= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 h1:kG4riHqlF9X6iZ1Oxs5/6ul6aue7MS+A6DK6HAchuTk= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1/go.mod h1:n9L7X3VAj4od4VHf2ScJuHARUUQTSxJqtRHZk/7Ptt0= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1 h1:C1bT0G1In6Z6tBERd1XqwDjdxTK+PatSOJYlVk5Is60= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1/go.mod h1:I9FcB1oNqT1nI+ny0GD8gF9YrIYrHmczgNu6MTE9fAo= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.1-00000000000000-9a039a6ed8db.1 h1:v+RKpd5zE6rqOMA44OLRpDLPYlakjmddvmFFrKxzb48= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.1-00000000000000-9a039a6ed8db.1/go.mod h1:HnX2FkSKZuD3zPFBR+Q17WzloqvIbFd0pYE++or/x2Q= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1 h1:inT/lOAbHunpGP9YLqtAQNssrxEIgH/OmxXNwbXjUqs= +buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1/go.mod h1:Lk1TBSGhOGvbtj0lb7eTeq+Z4N86/67Ay+WWxbqhh6s= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1 h1:hPMoxTiT7jJjnIbWqneBbL05VeVOTD9UeC/qdvzHL8g= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.1-20241017141511-71aab1871615.1/go.mod h1:2uasRFMH+a3DaF34c1o+w7/YtYnoknmARyYpb9W2QIc= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1 h1:uJm/22xugluY5AL2NkIDbNEFBxzN6UcI8vts/bGEDBs= buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.35.2-00000000000000-0eda7df0ee38.1/go.mod h1:1Z9P18WNTOT+KvLlc0+2FkcBJ7l5eRUUFcnOxHmLeRA= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.1-00000000000000-e54e1c9ad405.1 h1:querphz/TCGphT0qGG4DJo6p8qAsfL5/8SEBgfemVhk= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.1-00000000000000-e54e1c9ad405.1/go.mod h1:D6ou7OxkQXmiZDDNNrT147dA9wC9rhJPchCIfVbw9wM= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1 h1:n2embOKwJS+YIyjHRDvOAo7c/kuv3fw9U+gQ/g2Yis8= +buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1/go.mod h1:dHPKfn7RW6FSo7EkD0LqPhZUmRm5NXMB+tWvTrTnZTQ= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -923,6 +937,10 @@ google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFyt google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io= google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU= +google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/grpc/optimistic/server.go b/grpc/optimistic/server.go index 4d229c9f6..52cf8116c 100644 --- a/grpc/optimistic/server.go +++ b/grpc/optimistic/server.go @@ -1,8 +1,8 @@ package optimistic import ( - optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc" + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" "context" "errors" @@ -28,7 +28,7 @@ import ( type OptimisticServiceV1Alpha1 struct { optimisticGrpc.UnimplementedOptimisticExecutionServiceServer - optimisticGrpc.UnimplementedBundleServiceServer + optimisticGrpc.UnimplementedAuctionServiceServer sharedServiceContainer *shared.SharedServiceContainer @@ -54,8 +54,8 @@ func NewOptimisticServiceV1Alpha(sharedServiceContainer *shared.SharedServiceCon return optimisticService } -func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStreamRequest, stream optimisticGrpc.BundleService_GetBundleStreamServer) error { - log.Debug("GetBundleStream called") +func (o *OptimisticServiceV1Alpha1) GetBidStream(_ *optimsticPb.GetBidStreamRequest, stream optimisticGrpc.AuctionService_GetBidStreamServer) error { + log.Debug("GetBidStream called") pendingTxEventCh := make(chan core.NewTxsEvent) pendingTxEvent := o.Eth().TxPool().SubscribeTransactions(pendingTxEventCh, false) @@ -69,7 +69,7 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre optimisticBlock := o.Eth().BlockChain().CurrentOptimisticBlock() for _, pendingTx := range pendingTxs.Txs { - bundle := optimsticPb.Bundle{} + bid := optimsticPb.Bid{} totalCost := big.NewInt(0) effectiveTip := cmath.BigMin(pendingTx.GasTipCap(), new(big.Int).Sub(pendingTx.GasFeeCap(), optimisticBlock.BaseFee)) @@ -82,16 +82,16 @@ func (o *OptimisticServiceV1Alpha1) GetBundleStream(_ *optimsticPb.GetBundleStre } marshalledTxs = append(marshalledTxs, marshalledTx) - bundle.Fee = totalCost.Uint64() - bundle.Transactions = marshalledTxs - bundle.BaseSequencerBlockHash = *o.currentOptimisticSequencerBlock.Load() - bundle.PrevRollupBlockHash = optimisticBlock.Hash().Bytes() + bid.Fee = totalCost.Uint64() + bid.Transactions = marshalledTxs + bid.SequencerParentBlockHash = *o.currentOptimisticSequencerBlock.Load() + bid.RollupParentBlockHash = optimisticBlock.Hash().Bytes() txsStreamedCount.Inc(1) - err = stream.Send(&optimsticPb.GetBundleStreamResponse{Bundle: &bundle}) + err = stream.Send(&optimsticPb.GetBidStreamResponse{Bid: &bid}) if err != nil { - log.Error("error sending bundle over stream", "err", err) - return status.Error(codes.Internal, shared.WrapError(err, "error sending bundle over stream").Error()) + log.Error("error sending bid over stream", "err", err) + return status.Error(codes.Internal, shared.WrapError(err, "error sending bid over stream").Error()) } } diff --git a/grpc/optimistic/server_test.go b/grpc/optimistic/server_test.go index b0dd97720..538b0433b 100644 --- a/grpc/optimistic/server_test.go +++ b/grpc/optimistic/server_test.go @@ -1,7 +1,7 @@ package optimistic import ( - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" astriaPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/execution/v1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" @@ -193,7 +193,7 @@ func TestOptimisticServiceServerV1Alpha1_ExecuteOptimisticBlock(t *testing.T) { } } -func TestNewOptimisticServiceServerV1Alpha_StreamBundles(t *testing.T) { +func TestNewOptimisticServiceServerV1Alpha_StreamBids(t *testing.T) { ethservice, sharedService, _, _ := shared.SetupSharedService(t, 10) optimisticServiceV1Alpha1 := SetupOptimisticService(t, sharedService) @@ -286,13 +286,13 @@ func TestNewOptimisticServiceServerV1Alpha_StreamBundles(t *testing.T) { require.Equal(t, pending, 0, "Mempool should have 0 pending txs") require.Equal(t, queued, 0, "Mempool should have 0 queued txs") - mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBundleStreamResponse]{ - sentResponses: []*optimsticPb.GetBundleStreamResponse{}, + mockServerSideStreaming := MockServerSideStreaming[optimsticPb.GetBidStreamResponse]{ + sentResponses: []*optimsticPb.GetBidStreamResponse{}, } errorCh = make(chan error) go func() { - errorCh <- optimisticServiceV1Alpha1.GetBundleStream(&optimsticPb.GetBundleStreamRequest{}, &mockServerSideStreaming) + errorCh <- optimisticServiceV1Alpha1.GetBidStream(&optimsticPb.GetBidStreamRequest{}, &mockServerSideStreaming) }() stateDb, err := ethservice.BlockChain().StateAt(currentOptimisticBlock.Root) @@ -341,19 +341,19 @@ func TestNewOptimisticServiceServerV1Alpha_StreamBundles(t *testing.T) { txIndx := 0 for _, resp := range mockServerSideStreaming.sentResponses { - bundle := resp.GetBundle() + bid := resp.GetBid() - require.Len(t, bundle.Transactions, 1, "Bundle should have 1 tx") + require.Len(t, bid.Transactions, 1, "Bid should have 1 tx") - receivedTx := bundle.Transactions[0] + receivedTx := bid.Transactions[0] sentTx := txs[txIndx] marshalledSentTx, err := sentTx.MarshalBinary() require.Nil(t, err, "Failed to marshal tx") require.True(t, bytes.Equal(receivedTx, marshalledSentTx), "Received tx does not match sent tx") txIndx += 1 - require.True(t, bytes.Equal(bundle.PrevRollupBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") - require.True(t, bytes.Equal(bundle.BaseSequencerBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") + require.True(t, bytes.Equal(bid.RollupParentBlockHash, currentOptimisticBlock.Hash().Bytes()), "PrevRollupBlockHash should match the current optimistic block hash") + require.True(t, bytes.Equal(bid.SequencerParentBlockHash, *optimisticServiceV1Alpha1.currentOptimisticSequencerBlock.Load()), "BaseSequencerBlockHash should match the current optimistic sequencer block hash") } } diff --git a/grpc/optimistic/validation.go b/grpc/optimistic/validation.go index a59420d73..cbd6c62e6 100644 --- a/grpc/optimistic/validation.go +++ b/grpc/optimistic/validation.go @@ -1,7 +1,7 @@ package optimistic import ( - optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + optimsticPb "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" "fmt" ) diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index f7464f8d3..c90cadee7 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -1,7 +1,7 @@ package shared import ( - bundlev1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + auctionv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" primitivev1 "buf.build/gen/go/astria/primitives/protocolbuffers/go/astria/primitive/v1" sequencerblockv1 "buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go/astria/sequencerblock/v1" "bytes" @@ -128,7 +128,7 @@ func validateAndUnmarshallSequenceAction(tx *sequencerblockv1.RollupData) (*type return ethTx, nil } -func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) (types.Transactions, error) { +func unmarshallAllocationTxs(allocation *auctionv1alpha1.Allocation, prevBlockHash []byte, auctioneerBech32Address string, addressPrefix string) (types.Transactions, error) { unbundlingStart := time.Now() defer allocationUnbundlingTimer.UpdateSince(unbundlingStart) @@ -137,7 +137,7 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas log.Debug("Found a potential allocation in the rollup data. Checking if it is valid.", "prevBlockHash", common.BytesToHash(prevBlockHash).String(), "auctioneerBech32Address", auctioneerBech32Address) - if !bytes.Equal(payload.GetPrevRollupBlockHash(), prevBlockHash) { + if !bytes.Equal(payload.GetRollupParentBlockHash(), prevBlockHash) { allocationsWithInvalidPrevBlockHash.Inc(1) return nil, errors.New("prev block hash in allocation does not match the previous block hash") } @@ -164,8 +164,8 @@ func unmarshallAllocationTxs(allocation *bundlev1alpha1.Allocation, prevBlockHas return nil, fmt.Errorf("signature in allocation does not match the public key") } - log.Debug("Allocation is valid. Unmarshalling the transactions in the bundle.") - // unmarshall the transactions in the bundle + log.Debug("Allocation is valid. Unmarshalling the transactions in the bid.") + // unmarshall the transactions in the bid for _, allocationTx := range payload.GetTransactions() { ethtx := new(types.Transaction) err := ethtx.UnmarshalBinary(allocationTx) @@ -188,8 +188,8 @@ func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height u processedTxs := types.Transactions{} allocationTxs := types.Transactions{} - // we just return the allocation here and do not unmarshall the transactions in the bundle if we find it - var allocation *bundlev1alpha1.Allocation + // we just return the allocation here and do not unmarshall the transactions in the bid if we find it + var allocation *auctionv1alpha1.Allocation for _, tx := range txs { if deposit := tx.GetDeposit(); deposit != nil { depositTx, err := validateAndUnmarshalDepositTx(deposit, height, bridgeAddresses, bridgeAllowedAssets) @@ -204,7 +204,7 @@ func UnbundleRollupDataTransactions(txs []*sequencerblockv1.RollupData, height u // check if sequence data is of type Allocation if allocation == nil { // TODO - check if we can avoid a temp value - tempAllocation := &bundlev1alpha1.Allocation{} + tempAllocation := &auctionv1alpha1.Allocation{} err := proto.Unmarshal(sequenceData, tempAllocation) if err == nil { unmarshalledAllocationTxs, err := unmarshallAllocationTxs(tempAllocation, prevBlockHash, auctioneerBech32Address, addressPrefix) diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 4bf274090..3ae96a2e4 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -1,7 +1,7 @@ package shared import ( - bundlev1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/bundle/v1alpha1" + bidv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" "bytes" "crypto/ecdsa" "crypto/ed25519" @@ -88,11 +88,11 @@ func TestUnmarshallAllocationTxs(t *testing.T) { validMarshalledTx3, err := tx3.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - validPayload := &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, - BaseSequencerBlockHash: []byte("sequencer block hash"), - PrevRollupBlockHash: []byte("prev rollup block hash"), + validPayload := &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), } marshalledAllocation, err := proto.Marshal(validPayload) @@ -106,7 +106,7 @@ func TestUnmarshallAllocationTxs(t *testing.T) { tests := []struct { description string - allocation *bundlev1alpha1.Allocation + allocation *bidv1alpha1.Allocation prevBlockHash []byte expectedOutput types.Transactions // just check if error contains the string since error contains other details @@ -114,15 +114,15 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }{ { description: "previous block hash mismatch", - allocation: &bundlev1alpha1.Allocation{ + allocation: &bidv1alpha1.Allocation{ // TODO - add signature and public key validation Signature: make([]byte, 0), PublicKey: make([]byte, 0), - Payload: &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{[]byte("unmarshallable tx")}, - BaseSequencerBlockHash: []byte("sequencer block hash"), - PrevRollupBlockHash: []byte("prev rollup block hash"), + Payload: &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), }, }, prevBlockHash: []byte("not prev rollup block hash"), @@ -131,14 +131,14 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, { description: "public key doesn't match", - allocation: &bundlev1alpha1.Allocation{ + allocation: &bidv1alpha1.Allocation{ Signature: []byte("invalid signature"), PublicKey: []byte("invalid public key"), - Payload: &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{[]byte("unmarshallable tx")}, - BaseSequencerBlockHash: []byte("sequencer block hash"), - PrevRollupBlockHash: []byte("prev rollup block hash"), + Payload: &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), }, }, prevBlockHash: []byte("prev rollup block hash"), @@ -147,14 +147,14 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, { description: "invalid signature", - allocation: &bundlev1alpha1.Allocation{ + allocation: &bidv1alpha1.Allocation{ Signature: []byte("invalid signature"), PublicKey: auctioneerPubKey, - Payload: &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{[]byte("unmarshallable tx")}, - BaseSequencerBlockHash: []byte("sequencer block hash"), - PrevRollupBlockHash: []byte("prev rollup block hash"), + Payload: &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{[]byte("unmarshallable tx")}, + SequencerParentBlockHash: []byte("sequencer block hash"), + RollupParentBlockHash: []byte("prev rollup block hash"), }, }, prevBlockHash: []byte("prev rollup block hash"), @@ -163,7 +163,7 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, { description: "valid allocation", - allocation: &bundlev1alpha1.Allocation{ + allocation: &bidv1alpha1.Allocation{ Signature: signedAllocation, PublicKey: auctioneerPubKey, Payload: validPayload, @@ -394,11 +394,11 @@ func TestUnbundleRollupData(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - payload := &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, - BaseSequencerBlockHash: baseSequencerBlockHash, - PrevRollupBlockHash: prevRollupBlockHash, + payload := &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, } marshalledPayload, err := proto.Marshal(payload) @@ -409,7 +409,7 @@ func TestUnbundleRollupData(t *testing.T) { }) require.NoError(t, err, "failed to sign payload: %v", err) - allocation := &bundlev1alpha1.Allocation{ + allocation := &bidv1alpha1.Allocation{ Signature: signedPayload, PublicKey: auctioneerPubKey, Payload: payload, @@ -492,11 +492,11 @@ func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - payload := &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, - BaseSequencerBlockHash: baseSequencerBlockHash, - PrevRollupBlockHash: prevRollupBlockHash, + payload := &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, } marshalledPayload, err := proto.Marshal(payload) @@ -507,7 +507,7 @@ func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { }) require.NoError(t, err, "failed to sign payload: %v", err) - allocation := &bundlev1alpha1.Allocation{ + allocation := &bidv1alpha1.Allocation{ Signature: signedPayload, PublicKey: auctioneerPubKey, Payload: payload, @@ -607,11 +607,11 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { invalidMarshalledTx2, err := invalidTx2.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - payload := &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, - BaseSequencerBlockHash: baseSequencerBlockHash, - PrevRollupBlockHash: prevRollupBlockHash, + payload := &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, } marshalledPayload, err := proto.Marshal(payload) @@ -622,11 +622,11 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { }) require.NoError(t, err, "failed to sign allocation: %v", err) - invalidPayload := &bundlev1alpha1.Bundle{ - Fee: 100, - Transactions: [][]byte{invalidMarshalledTx1, invalidMarshalledTx2}, - BaseSequencerBlockHash: baseSequencerBlockHash, - PrevRollupBlockHash: prevRollupBlockHash, + invalidPayload := &bidv1alpha1.Bid{ + Fee: 100, + Transactions: [][]byte{invalidMarshalledTx1, invalidMarshalledTx2}, + SequencerParentBlockHash: baseSequencerBlockHash, + RollupParentBlockHash: prevRollupBlockHash, } marshalledInvalidPayload, err := proto.Marshal(invalidPayload) require.NoError(t, err, "failed to marshal invalid allocation: %v", err) @@ -637,7 +637,7 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { }) require.NoError(t, err, "failed to sign allocation: %v", err) - allocation := &bundlev1alpha1.Allocation{ + allocation := &bidv1alpha1.Allocation{ Signature: signedPayload, PublicKey: auctioneerPubKey, Payload: payload, @@ -646,7 +646,7 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { marshalledAllocation, err := proto.Marshal(allocation) require.NoError(t, err, "failed to marshal allocation: %v", err) - invalidAllocation := &bundlev1alpha1.Allocation{ + invalidAllocation := &bidv1alpha1.Allocation{ Signature: signedInvalidPayload, // trying to spoof the actual auctioneer key PublicKey: auctioneerPubKey, diff --git a/node/grpcstack.go b/node/grpcstack.go index b3a34c2ca..15000f9ab 100644 --- a/node/grpcstack.go +++ b/node/grpcstack.go @@ -1,7 +1,7 @@ package node import ( - optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/bundle/v1alpha1/bundlev1alpha1grpc" + optimisticGrpc "buf.build/gen/go/astria/execution-apis/grpc/go/astria/auction/v1alpha1/auctionv1alpha1grpc" "net" "sync" @@ -19,7 +19,7 @@ type GRPCServerHandler struct { execServer *grpc.Server executionServiceServerV1a2 *astriaGrpc.ExecutionServiceServer optimisticExecServ *optimisticGrpc.OptimisticExecutionServiceServer - streamBundleServ *optimisticGrpc.BundleServiceServer + auctionServiceServ *optimisticGrpc.AuctionServiceServer enableAuctioneer bool } @@ -27,7 +27,7 @@ type GRPCServerHandler struct { // NewServer creates a new gRPC server. // It registers the execution service server. // It registers the gRPC server with the node so it can be stopped on shutdown. -func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, streamBundleServ optimisticGrpc.BundleServiceServer, cfg *Config) error { +func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer, optimisticExecServ optimisticGrpc.OptimisticExecutionServiceServer, auctionServiceServ optimisticGrpc.AuctionServiceServer, cfg *Config) error { execServer := grpc.NewServer() log.Info("gRPC server enabled", "endpoint", cfg.GRPCEndpoint()) @@ -37,14 +37,14 @@ func NewGRPCServerHandler(node *Node, execServ astriaGrpc.ExecutionServiceServer execServer: execServer, executionServiceServerV1a2: &execServ, optimisticExecServ: &optimisticExecServ, - streamBundleServ: &streamBundleServ, + auctionServiceServ: &auctionServiceServ, enableAuctioneer: cfg.EnableAuctioneer, } astriaGrpc.RegisterExecutionServiceServer(execServer, execServ) if cfg.EnableAuctioneer { optimisticGrpc.RegisterOptimisticExecutionServiceServer(execServer, optimisticExecServ) - optimisticGrpc.RegisterBundleServiceServer(execServer, streamBundleServ) + optimisticGrpc.RegisterAuctionServiceServer(execServer, auctionServiceServ) } node.RegisterGRPCServer(serverHandler) From 1c1a935a988ddecdc2a9be790e86318096364bf1 Mon Sep 17 00:00:00 2001 From: Bharath Date: Mon, 13 Jan 2025 22:42:51 +0530 Subject: [PATCH 79/79] update protobufs --- go.mod | 4 +- go.sum | 4 ++ grpc/shared/validation.go | 20 ++++-- grpc/shared/validation_test.go | 127 +++++++++++++++++++++------------ 4 files changed, 103 insertions(+), 52 deletions(-) diff --git a/go.mod b/go.mod index c9445016e..55ab2532b 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/ethereum/go-ethereum go 1.21 require ( - buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2 - buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1 + buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2 + buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1 buf.build/gen/go/astria/primitives/protocolbuffers/go v1.36.2-00000000000000-9a039a6ed8db.1 buf.build/gen/go/astria/sequencerblock-apis/protocolbuffers/go v1.36.2-00000000000000-e54e1c9ad405.1 github.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.2.0 diff --git a/go.sum b/go.sum index 78e748f2d..992acbb76 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,7 @@ buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2 h1:9rMXnvPR2EX56tMIqbhOK+DvqKjWb++p5s1/bookIl8= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-1f40f333891d.2/go.mod h1:hdCXwnxpMeoqXK5LCQ6gLMcmMLUDX8T9+hbxYrtj+wQ= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2 h1:W0lzc0sAzlzyKWWXLcuGW+GDsB9VRT+P/4ffP/hwJ4U= +buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-42cbdd5aad4c.2/go.mod h1:jXiXYlSxLrhrUCAIuLq4cVcfXydbsz9mRVftWx/8eGs= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1 h1:wOry49zAbse0G4mt2tFTwa4P2AUMuYCR/0mYcPrpcbs= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-cc31a327d543.1/go.mod h1:+pVCkEpJNp2JtooS8NiydT7bO9+hu11XUZ5Z47DPtXo= buf.build/gen/go/astria/execution-apis/grpc/go v1.5.1-00000000000000-e09c7fd3fe26.1 h1:gS4erruX5XeMN0MZ7xe4JmEIR3uCWrvzG5HGV725WiI= @@ -16,6 +18,8 @@ buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000 buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.1-00000000000000-1f40f333891d.1/go.mod h1:7azHjtjY3sk38xuZGlf2X6DpAPgQMoeZZMix+JkqsdU= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1 h1:cRvRFDg3/KPgEB2+8/orNwCWBhZO0wVZKij4TTKBj9w= buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-1f40f333891d.1/go.mod h1:oB3M+Fq9RgyUWGMqYk2FqRobQpdH1yZQZ9TYOoc4yIw= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1 h1:GnqNuwC6UjXvtjGscDekiO+/lstY7NWOILlsOMGNpC4= +buf.build/gen/go/astria/execution-apis/protocolbuffers/go v1.36.2-00000000000000-42cbdd5aad4c.1/go.mod h1:oB3M+Fq9RgyUWGMqYk2FqRobQpdH1yZQZ9TYOoc4yIw= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1 h1:kG4riHqlF9X6iZ1Oxs5/6ul6aue7MS+A6DK6HAchuTk= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.1-20240911152449-eeebd3decdce.1/go.mod h1:n9L7X3VAj4od4VHf2ScJuHARUUQTSxJqtRHZk/7Ptt0= buf.build/gen/go/astria/primitives/protocolbuffers/go v1.35.2-00000000000000-2f2e9ce53f59.1 h1:C1bT0G1In6Z6tBERd1XqwDjdxTK+PatSOJYlVk5Is60= diff --git a/grpc/shared/validation.go b/grpc/shared/validation.go index c90cadee7..90ae13619 100644 --- a/grpc/shared/validation.go +++ b/grpc/shared/validation.go @@ -16,6 +16,8 @@ import ( "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/params" "github.com/golang/protobuf/proto" + proto2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" "math/big" "time" ) @@ -133,11 +135,21 @@ func unmarshallAllocationTxs(allocation *auctionv1alpha1.Allocation, prevBlockHa defer allocationUnbundlingTimer.UpdateSince(unbundlingStart) processedTxs := types.Transactions{} - payload := allocation.GetPayload() + bid := &auctionv1alpha1.Bid{} + + unprocessedBid := allocation.GetBid() + + err := anypb.UnmarshalTo(unprocessedBid, bid, proto2.UnmarshalOptions{ + Merge: false, + AllowPartial: false, + }) + if err != nil { + return nil, WrapError(err, "failed to unmarshal bid") + } log.Debug("Found a potential allocation in the rollup data. Checking if it is valid.", "prevBlockHash", common.BytesToHash(prevBlockHash).String(), "auctioneerBech32Address", auctioneerBech32Address) - if !bytes.Equal(payload.GetRollupParentBlockHash(), prevBlockHash) { + if !bytes.Equal(bid.GetRollupParentBlockHash(), prevBlockHash) { allocationsWithInvalidPrevBlockHash.Inc(1) return nil, errors.New("prev block hash in allocation does not match the previous block hash") } @@ -153,7 +165,7 @@ func unmarshallAllocationTxs(allocation *auctionv1alpha1.Allocation, prevBlockHa return nil, fmt.Errorf("address in allocation does not match auctioneer address. expected: %s, got: %s", auctioneerBech32Address, bech32Address) } - message, err := proto.Marshal(allocation.GetPayload()) + message, err := proto.Marshal(bid) if err != nil { return nil, WrapError(err, "failed to marshal allocation to verify signature") } @@ -166,7 +178,7 @@ func unmarshallAllocationTxs(allocation *auctionv1alpha1.Allocation, prevBlockHa log.Debug("Allocation is valid. Unmarshalling the transactions in the bid.") // unmarshall the transactions in the bid - for _, allocationTx := range payload.GetTransactions() { + for _, allocationTx := range bid.GetTransactions() { ethtx := new(types.Transaction) err := ethtx.UnmarshalBinary(allocationTx) if err != nil { diff --git a/grpc/shared/validation_test.go b/grpc/shared/validation_test.go index 3ae96a2e4..3292d5334 100644 --- a/grpc/shared/validation_test.go +++ b/grpc/shared/validation_test.go @@ -1,11 +1,12 @@ package shared import ( - bidv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" + auctionv1alpha1 "buf.build/gen/go/astria/execution-apis/protocolbuffers/go/astria/auction/v1alpha1" "bytes" "crypto/ecdsa" "crypto/ed25519" "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/types/known/anypb" "math/big" "testing" @@ -20,6 +21,25 @@ import ( "github.com/stretchr/testify/require" ) +type allocationInfo struct { + signature []byte + publicKey []byte + bid *auctionv1alpha1.Bid +} + +func (a *allocationInfo) convertToAllocation() (*auctionv1alpha1.Allocation, error) { + convertedBid, err := anypb.New(a.bid) + if err != nil { + return nil, err + } + + return &auctionv1alpha1.Allocation{ + Signature: a.signature, + PublicKey: a.publicKey, + Bid: convertedBid, + }, nil +} + func transaction(nonce uint64, gaslimit uint64, key *ecdsa.PrivateKey) *types.Transaction { return pricedTransaction(nonce, gaslimit, big.NewInt(1), key) } @@ -88,14 +108,14 @@ func TestUnmarshallAllocationTxs(t *testing.T) { validMarshalledTx3, err := tx3.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - validPayload := &bidv1alpha1.Bid{ + validBid := &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, SequencerParentBlockHash: []byte("sequencer block hash"), RollupParentBlockHash: []byte("prev rollup block hash"), } - marshalledAllocation, err := proto.Marshal(validPayload) + marshalledAllocation, err := proto.Marshal(validBid) require.NoError(t, err, "failed to marshal payload: %v", err) signedAllocation, err := auctioneerPrivKey.Sign(nil, marshalledAllocation, &ed25519.Options{ @@ -105,8 +125,10 @@ func TestUnmarshallAllocationTxs(t *testing.T) { require.NoError(t, err, "failed to sign allocation: %v", err) tests := []struct { - description string - allocation *bidv1alpha1.Allocation + description string + + allocationInfo allocationInfo + prevBlockHash []byte expectedOutput types.Transactions // just check if error contains the string since error contains other details @@ -114,11 +136,10 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }{ { description: "previous block hash mismatch", - allocation: &bidv1alpha1.Allocation{ - // TODO - add signature and public key validation - Signature: make([]byte, 0), - PublicKey: make([]byte, 0), - Payload: &bidv1alpha1.Bid{ + allocationInfo: allocationInfo{ + signature: make([]byte, 0), + publicKey: make([]byte, 0), + bid: &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, SequencerParentBlockHash: []byte("sequencer block hash"), @@ -131,10 +152,10 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, { description: "public key doesn't match", - allocation: &bidv1alpha1.Allocation{ - Signature: []byte("invalid signature"), - PublicKey: []byte("invalid public key"), - Payload: &bidv1alpha1.Bid{ + allocationInfo: allocationInfo{ + signature: []byte("invalid signature"), + publicKey: []byte("invalid public key"), + bid: &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, SequencerParentBlockHash: []byte("sequencer block hash"), @@ -147,10 +168,10 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, { description: "invalid signature", - allocation: &bidv1alpha1.Allocation{ - Signature: []byte("invalid signature"), - PublicKey: auctioneerPubKey, - Payload: &bidv1alpha1.Bid{ + allocationInfo: allocationInfo{ + signature: []byte("invalid signature"), + publicKey: auctioneerPubKey, + bid: &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{[]byte("unmarshallable tx")}, SequencerParentBlockHash: []byte("sequencer block hash"), @@ -163,10 +184,10 @@ func TestUnmarshallAllocationTxs(t *testing.T) { }, { description: "valid allocation", - allocation: &bidv1alpha1.Allocation{ - Signature: signedAllocation, - PublicKey: auctioneerPubKey, - Payload: validPayload, + allocationInfo: allocationInfo{ + signature: signedAllocation, + publicKey: auctioneerPubKey, + bid: validBid, }, prevBlockHash: []byte("prev rollup block hash"), expectedOutput: types.Transactions{tx1, tx2, tx3}, @@ -176,7 +197,10 @@ func TestUnmarshallAllocationTxs(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { - finalTxs, err := unmarshallAllocationTxs(test.allocation, test.prevBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) + allocation, err := test.allocationInfo.convertToAllocation() + require.NoError(t, err, "failed to convert allocation info to allocation: %v", err) + + finalTxs, err := unmarshallAllocationTxs(allocation, test.prevBlockHash, serviceV1Alpha1.AuctioneerAddress(), addressPrefix) if test.wantErr == "" && err == nil { for _, tx := range test.expectedOutput { foundTx := false @@ -394,25 +418,28 @@ func TestUnbundleRollupData(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - payload := &bidv1alpha1.Bid{ + bid := &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, SequencerParentBlockHash: baseSequencerBlockHash, RollupParentBlockHash: prevRollupBlockHash, } - marshalledPayload, err := proto.Marshal(payload) + marshalledBid, err := proto.Marshal(bid) require.NoError(t, err, "failed to marshal payload: %v", err) - signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ + signedBid, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{ Hash: 0, Context: "", }) require.NoError(t, err, "failed to sign payload: %v", err) - allocation := &bidv1alpha1.Allocation{ - Signature: signedPayload, + // TODO - we need better naming here! + finalBid, err := anypb.New(bid) + + allocation := &auctionv1alpha1.Allocation{ + Signature: signedBid, PublicKey: auctioneerPubKey, - Payload: payload, + Bid: finalBid, } marshalledAllocation, err := proto.Marshal(allocation) @@ -492,25 +519,28 @@ func TestUnbundleRollupDataWithDuplicateAllocations(t *testing.T) { validMarshalledTx5, err := tx5.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - payload := &bidv1alpha1.Bid{ + bid := &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, SequencerParentBlockHash: baseSequencerBlockHash, RollupParentBlockHash: prevRollupBlockHash, } - marshalledPayload, err := proto.Marshal(payload) + marshalledBid, err := proto.Marshal(bid) require.NoError(t, err, "failed to marshal payload: %v", err) - signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ + signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{ Hash: 0, Context: "", }) require.NoError(t, err, "failed to sign payload: %v", err) - allocation := &bidv1alpha1.Allocation{ + finalBid, err := anypb.New(bid) + require.NoError(t, err, "failed to convert bid to anypb: %v", err) + + allocation := &auctionv1alpha1.Allocation{ Signature: signedPayload, PublicKey: auctioneerPubKey, - Payload: payload, + Bid: finalBid, } marshalledAllocation, err := proto.Marshal(allocation) @@ -607,50 +637,55 @@ func TestUnbundleRollupDataWithDuplicateInvalidAllocations(t *testing.T) { invalidMarshalledTx2, err := invalidTx2.MarshalBinary() require.NoError(t, err, "failed to marshal valid tx: %v", err) - payload := &bidv1alpha1.Bid{ + bid := &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{validMarshalledTx1, validMarshalledTx2, validMarshalledTx3}, SequencerParentBlockHash: baseSequencerBlockHash, RollupParentBlockHash: prevRollupBlockHash, } + validBidAny, err := anypb.New(bid) + require.NoError(t, err, "failed to convert bid to anypb: %v", err) - marshalledPayload, err := proto.Marshal(payload) + marshalledBid, err := proto.Marshal(bid) require.NoError(t, err, "failed to marshal allocation: %v", err) - signedPayload, err := auctioneerPrivKey.Sign(nil, marshalledPayload, &ed25519.Options{ + signedBid, err := auctioneerPrivKey.Sign(nil, marshalledBid, &ed25519.Options{ Hash: 0, Context: "", }) require.NoError(t, err, "failed to sign allocation: %v", err) - invalidPayload := &bidv1alpha1.Bid{ + invalidBid := &auctionv1alpha1.Bid{ Fee: 100, Transactions: [][]byte{invalidMarshalledTx1, invalidMarshalledTx2}, SequencerParentBlockHash: baseSequencerBlockHash, RollupParentBlockHash: prevRollupBlockHash, } - marshalledInvalidPayload, err := proto.Marshal(invalidPayload) + invalidBidAny, err := anypb.New(invalidBid) + require.NoError(t, err, "failed to convert bid to anypb: %v", err) + + marshalledInvalidBid, err := proto.Marshal(invalidBid) require.NoError(t, err, "failed to marshal invalid allocation: %v", err) - signedInvalidPayload, err := invalidAuctioneerprivkey.Sign(nil, marshalledInvalidPayload, &ed25519.Options{ + signedInvalidBid, err := invalidAuctioneerprivkey.Sign(nil, marshalledInvalidBid, &ed25519.Options{ Hash: 0, Context: "", }) require.NoError(t, err, "failed to sign allocation: %v", err) - allocation := &bidv1alpha1.Allocation{ - Signature: signedPayload, + allocation := &auctionv1alpha1.Allocation{ + Signature: signedBid, PublicKey: auctioneerPubKey, - Payload: payload, + Bid: validBidAny, } marshalledAllocation, err := proto.Marshal(allocation) require.NoError(t, err, "failed to marshal allocation: %v", err) - invalidAllocation := &bidv1alpha1.Allocation{ - Signature: signedInvalidPayload, + invalidAllocation := &auctionv1alpha1.Allocation{ + Signature: signedInvalidBid, // trying to spoof the actual auctioneer key PublicKey: auctioneerPubKey, - Payload: invalidPayload, + Bid: invalidBidAny, } marshalledInvalidAllocation, err := proto.Marshal(invalidAllocation) require.NoError(t, err, "failed to marshal invalid allocation: %v", err)