Skip to content

Commit 29454c2

Browse files
committed
itest: add new coop close rbf itest
This ensures that during the RBF process, if one confirms, a re-org occurs, then another confirms, that we'll properly detect this case.
1 parent 2666c71 commit 29454c2

File tree

3 files changed

+195
-2
lines changed

3 files changed

+195
-2
lines changed

itest/list_on_test.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -727,6 +727,10 @@ var allTestCases = []*lntest.TestCase{
727727
Name: "rbf coop close disconnect",
728728
TestFunc: testRBFCoopCloseDisconnect,
729729
},
730+
{
731+
Name: "coop close rbf with reorg",
732+
TestFunc: testCoopCloseRBFWithReorg,
733+
},
730734
{
731735
Name: "bump fee low budget",
732736
TestFunc: testBumpFeeLowBudget,

itest/lnd_coop_close_rbf_test.go

Lines changed: 175 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,13 @@
11
package itest
22

33
import (
4+
"fmt"
5+
46
"github.com/btcsuite/btcd/btcutil"
7+
"github.com/btcsuite/btcd/chaincfg/chainhash"
8+
"github.com/lightningnetwork/lnd/lnrpc"
59
"github.com/lightningnetwork/lnd/lntest"
10+
"github.com/lightningnetwork/lnd/lntest/wait"
611
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
712
"github.com/stretchr/testify/require"
813
)
@@ -153,3 +158,173 @@ func testRBFCoopCloseDisconnect(ht *lntest.HarnessTest) {
153158
// Disconnect Bob from Alice.
154159
ht.DisconnectNodes(alice, bob)
155160
}
161+
162+
// testCoopCloseRBFWithReorg tests that when a cooperative close transaction
163+
// is reorganized out during confirmation waiting, the system properly handles
164+
// RBF replacements and re-registration for any spend of the funding output.
165+
func testCoopCloseRBFWithReorg(ht *lntest.HarnessTest) {
166+
// Skip this test for neutrino backend as we can't trigger reorgs.
167+
if ht.IsNeutrinoBackend() {
168+
ht.Skipf("skipping reorg test for neutrino backend")
169+
}
170+
171+
// Force cooperative close to require 3 confirmations for predictable
172+
// testing.
173+
const requiredConfs = 3
174+
rbfCoopFlags := []string{
175+
"--protocol.rbf-coop-close",
176+
"--dev.force-channel-close-confs=3",
177+
}
178+
179+
// Set the fee estimate to 1sat/vbyte to ensure our RBF attempts work.
180+
ht.SetFeeEstimate(250)
181+
ht.SetFeeEstimateWithConf(250, 6)
182+
183+
// Create two nodes with enough coins for a 50/50 channel.
184+
cfgs := [][]string{rbfCoopFlags, rbfCoopFlags}
185+
params := lntest.OpenChannelParams{
186+
Amt: btcutil.Amount(10_000_000),
187+
PushAmt: btcutil.Amount(5_000_000),
188+
}
189+
chanPoints, nodes := ht.CreateSimpleNetwork(cfgs, params)
190+
alice, bob := nodes[0], nodes[1]
191+
chanPoint := chanPoints[0]
192+
193+
// Initiate cooperative close with initial fee rate of 5 sat/vb.
194+
initialFeeRate := chainfee.SatPerVByte(5)
195+
_, aliceCloseUpdate := ht.CloseChannelAssertPending(
196+
alice, chanPoint, false,
197+
lntest.WithCoopCloseFeeRate(initialFeeRate),
198+
lntest.WithLocalTxNotify(),
199+
)
200+
201+
// Verify the initial close transaction is at the expected fee rate.
202+
alicePendingUpdate := aliceCloseUpdate.GetClosePending()
203+
require.NotNil(ht, aliceCloseUpdate)
204+
require.Equal(
205+
ht, int64(initialFeeRate), alicePendingUpdate.FeePerVbyte,
206+
)
207+
208+
// Capture the initial close transaction from the mempool.
209+
initialCloseTxid, err := chainhash.NewHash(alicePendingUpdate.Txid)
210+
require.NoError(ht, err)
211+
initialCloseTx := ht.AssertTxInMempool(*initialCloseTxid)
212+
213+
// Create first RBF replacement before any mining.
214+
firstRbfFeeRate := chainfee.SatPerVByte(10)
215+
_, firstRbfUpdate := ht.CloseChannelAssertPending(
216+
bob, chanPoint, false,
217+
lntest.WithCoopCloseFeeRate(firstRbfFeeRate),
218+
lntest.WithLocalTxNotify(),
219+
)
220+
221+
// Capture the first RBF transaction.
222+
firstRbfTxid, err := chainhash.NewHash(firstRbfUpdate.GetClosePending().Txid)
223+
require.NoError(ht, err)
224+
firstRbfTx := ht.AssertTxInMempool(*firstRbfTxid)
225+
226+
_, bestHeight, err := ht.Miner().Client.GetBestBlock()
227+
require.NoError(ht, err)
228+
229+
ht.Logf("Current block height: %d", bestHeight)
230+
231+
// Mine n-1 blocks (2 blocks when requiring 3 confirmations) with the
232+
// first RBF transaction. This is just shy of full confirmation.
233+
block1 := ht.Miner().MineBlockWithTxes(
234+
[]*btcutil.Tx{btcutil.NewTx(firstRbfTx)},
235+
)
236+
237+
ht.Logf("Mined block %d with first RBF tx", bestHeight+1)
238+
239+
block2 := ht.MineEmptyBlocks(1)[0]
240+
241+
ht.Logf("Mined block %d", bestHeight+2)
242+
243+
ht.Logf("Re-orging two blocks to remove first RBF tx")
244+
245+
// Trigger a reorganization that removes the last 2 blocks. This is safe
246+
// because we haven't reached full confirmation yet.
247+
bestBlockHash := block2.Header.BlockHash()
248+
require.NoError(
249+
ht, ht.Miner().Client.InvalidateBlock(&bestBlockHash),
250+
)
251+
bestBlockHash = block1.Header.BlockHash()
252+
require.NoError(
253+
ht, ht.Miner().Client.InvalidateBlock(&bestBlockHash),
254+
)
255+
256+
_, bestHeight, err = ht.Miner().Client.GetBestBlock()
257+
require.NoError(ht, err)
258+
ht.Logf("Re-orged to block height: %d", bestHeight)
259+
260+
ht.Log("Mining blocks to surpass previous chain")
261+
262+
// Mine 2 empty blocks to trigger the reorg on the nodes.
263+
ht.MineEmptyBlocks(2)
264+
265+
_, bestHeight, err = ht.Miner().Client.GetBestBlock()
266+
require.NoError(ht, err)
267+
ht.Logf("Mined blocks to reach height: %d", bestHeight)
268+
269+
// Now, instead of mining the second RBF, mine the INITIAL transaction
270+
// to test that the system can handle any valid spend of the funding
271+
// output.
272+
block := ht.Miner().MineBlockWithTxes(
273+
[]*btcutil.Tx{btcutil.NewTx(initialCloseTx)},
274+
)
275+
ht.AssertTxInBlock(block, *initialCloseTxid)
276+
277+
// Mine additional blocks to reach the required confirmations (3 total).
278+
ht.MineEmptyBlocks(requiredConfs - 1)
279+
280+
// Both parties should see that the channel is now fully closed on chain
281+
// with the expected closing txid.
282+
expectedClosingTxid := initialCloseTxid.String()
283+
err = wait.NoError(func() error {
284+
req := &lnrpc.ClosedChannelsRequest{}
285+
aliceClosedChans := alice.RPC.ClosedChannels(req)
286+
bobClosedChans := bob.RPC.ClosedChannels(req)
287+
if len(aliceClosedChans.Channels) != 1 {
288+
return fmt.Errorf("alice: expected 1 closed chan, got %d",
289+
len(aliceClosedChans.Channels))
290+
}
291+
if len(bobClosedChans.Channels) != 1 {
292+
return fmt.Errorf("bob: expected 1 closed chan, got %d",
293+
len(bobClosedChans.Channels))
294+
}
295+
296+
// Verify both Alice and Bob have the expected closing txid.
297+
aliceClosedChan := aliceClosedChans.Channels[0]
298+
if aliceClosedChan.ClosingTxHash != expectedClosingTxid {
299+
return fmt.Errorf("alice: expected closing txid %s, "+
300+
"got %s",
301+
expectedClosingTxid,
302+
aliceClosedChan.ClosingTxHash)
303+
}
304+
if aliceClosedChan.CloseType !=
305+
lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE {
306+
return fmt.Errorf("alice: expected cooperative "+
307+
"close, got %v",
308+
aliceClosedChan.CloseType)
309+
}
310+
311+
bobClosedChan := bobClosedChans.Channels[0]
312+
if bobClosedChan.ClosingTxHash != expectedClosingTxid {
313+
return fmt.Errorf("bob: expected closing txid %s, "+
314+
"got %s",
315+
expectedClosingTxid,
316+
bobClosedChan.ClosingTxHash)
317+
}
318+
if bobClosedChan.CloseType !=
319+
lnrpc.ChannelCloseSummary_COOPERATIVE_CLOSE {
320+
return fmt.Errorf("bob: expected cooperative "+
321+
"close, got %v",
322+
bobClosedChan.CloseType)
323+
}
324+
325+
return nil
326+
}, defaultTimeout)
327+
require.NoError(ht, err)
328+
329+
ht.Logf("Successfully verified closing txid: %s", expectedClosingTxid)
330+
}

itest/lnd_funding_test.go

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1272,8 +1272,17 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
12721272
// Make sure Carol sees her to_remote output from the force close tx.
12731273
ht.AssertNumPendingSweeps(carol, 1)
12741274

1275-
// We need to wait for carol initiating the sweep of the to_remote
1276-
// output of chanPoint2.
1275+
// Wait for Carol's sweep transaction to appear in the mempool. Due to
1276+
// async confirmation notifications, there's a race between when the
1277+
// sweep is registered and when the sweeper processes the next block.
1278+
// The sweeper uses immediate=false, so it broadcasts on the next block
1279+
// after registration. Mine an empty block to trigger the broadcast.
1280+
ht.MineEmptyBlocks(1)
1281+
1282+
// Now the sweep should be in the mempool.
1283+
ht.AssertNumTxsInMempool(1)
1284+
1285+
// Now we should see the unconfirmed UTXO from the sweep.
12771286
utxo := ht.AssertNumUTXOsUnconfirmed(carol, 1)[0]
12781287

12791288
// We now try to open channel using the unconfirmed utxo.
@@ -1329,6 +1338,11 @@ func testChannelFundingWithUnstableUtxos(ht *lntest.HarnessTest) {
13291338
// Make sure Carol sees her to_remote output from the force close tx.
13301339
ht.AssertNumPendingSweeps(carol, 1)
13311340

1341+
// Mine an empty block to trigger the sweep broadcast (same fix as
1342+
// above).
1343+
ht.MineEmptyBlocks(1)
1344+
ht.AssertNumTxsInMempool(1)
1345+
13321346
// Wait for the to_remote sweep tx to show up in carol's wallet.
13331347
ht.AssertNumUTXOsUnconfirmed(carol, 1)
13341348

0 commit comments

Comments
 (0)