diff --git a/channeldb/channel.go b/channeldb/channel.go index 6221208a8c3..1fcfff9e4b6 100644 --- a/channeldb/channel.go +++ b/channeldb/channel.go @@ -121,6 +121,11 @@ var ( // broadcasted when moving the channel to state CoopBroadcasted. coopCloseTxKey = []byte("coop-closing-tx-key") + // deliveryScriptKey points to the delivery script to be sent along in + // the Shutdown message. This is committed when moving the channel to + // state ShutdownSent. + deliveryScriptKey = []byte("delivery-script-key") + // commitDiffKey stores the current pending commitment state we've // extended to the remote party (if any). Each time we propose a new // state, we store the information necessary to reconstruct this state @@ -188,6 +193,10 @@ var ( // in the state CommitBroadcasted. ErrNoCloseTx = fmt.Errorf("no closing tx found") + // ErrNoDeliveryScript is returned when no closing delivery script has + // been persisted for a channel. + ErrNoDeliveryScript = fmt.Errorf("no delivery script") + // ErrNoRestoredChannelMutation is returned when a caller attempts to // mutate a channel that's been recovered. ErrNoRestoredChannelMutation = fmt.Errorf("cannot mutate restored " + @@ -597,6 +606,11 @@ var ( // ChanStatusRemoteCloseInitiator indicates that the remote node // initiated closing the channel. ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6 + + // ChanStatusShutdownSent indicates that a shutdown has been initiated. + // The ChanStatusLocalCloseInitiator and ChanStatusRemoteCloseInitiator + // flags are used to determine which party initiated the shutdown. + ChanStatusShutdownSent ChannelStatus = 1 << 7 ) // chanStatusStrings maps a ChannelStatus to a human friendly string that @@ -607,6 +621,7 @@ var chanStatusStrings = map[ChannelStatus]string{ ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted", ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss", ChanStatusRestored: "ChanStatusRestored", + ChanStatusShutdownSent: "ChanStatusShutdownSent", ChanStatusCoopBroadcasted: "ChanStatusCoopBroadcasted", ChanStatusLocalCloseInitiator: "ChanStatusLocalCloseInitiator", ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator", @@ -618,6 +633,7 @@ var orderedChanStatusFlags = []ChannelStatus{ ChanStatusCommitBroadcasted, ChanStatusLocalDataLoss, ChanStatusRestored, + ChanStatusShutdownSent, ChanStatusCoopBroadcasted, ChanStatusLocalCloseInitiator, ChanStatusRemoteCloseInitiator, @@ -1589,6 +1605,83 @@ func (c *OpenChannel) isBorked(chanBucket kvdb.RBucket) (bool, error) { return channel.chanStatus != ChanStatusDefault, nil } +// MarkShutdownSent updates the channel status to indicate that a shutdown has +// been initiated. It also persists the delivery script that we will use in +// the Shutdown message so that we can guarantee that the same delivery script +// is used if a re-connect happens. +func (c *OpenChannel) MarkShutdownSent(localDeliveryScript []byte, + locallyInitiated bool) error { + + return c.markShutdownSent(localDeliveryScript, locallyInitiated) +} + +// markShutdownSent is a helper function which modifies the channel status of +// the receiving channel and inserts a delivery script under the delivery script +// key. It adds a status which indicates the party that initiated the channel +// close. +func (c *OpenChannel) markShutdownSent(deliveryScript []byte, + locallyInitiated bool) error { + + c.Lock() + defer c.Unlock() + + putDeliveryScript := func(chanBucket kvdb.RwBucket) error { + return chanBucket.Put(deliveryScriptKey, deliveryScript) + } + + status := ChanStatusShutdownSent + if locallyInitiated { + status |= ChanStatusLocalCloseInitiator + } else { + status |= ChanStatusRemoteCloseInitiator + } + + return c.putChanStatus(status, putDeliveryScript) +} + +// DeliveryScript returns the delivery script to use in the channel's Shutdown +// message. ErrNoDeliveryScript is returned if no such delivery script has been +// persisted yet. +func (c *OpenChannel) DeliveryScript() ([]byte, error) { + var deliveryScript []byte + + err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error { + chanBucket, err := fetchChanBucket( + tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, + ) + switch { + case err == nil: + case errors.Is(err, ErrNoChanDBExists), + errors.Is(err, ErrNoActiveChannels), + errors.Is(err, ErrChannelNotFound): + + return ErrNoDeliveryScript + default: + return err + } + + delScriptBytes := chanBucket.Get(deliveryScriptKey) + if delScriptBytes == nil { + return ErrNoDeliveryScript + } + + // Make a copy of the returned address bytes here since the + // value returned by Get is not safe to use outside of this + // transaction. + deliveryScript = make([]byte, len(delScriptBytes)) + copy(deliveryScript, delScriptBytes) + + return nil + }, func() { + deliveryScript = nil + }) + if err != nil { + return nil, err + } + + return deliveryScript, nil +} + // MarkCommitmentBroadcasted marks the channel as a commitment transaction has // been broadcast, either our own or the remote, and we should watch the chain // for it to confirm before taking any further action. It takes as argument the diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index bfebad824bc..b49ab4a8212 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -1158,6 +1158,43 @@ func TestFetchWaitingCloseChannels(t *testing.T) { } } +// TestDeliveryScript tests that a channel's delivery script can be set when +// its status is updated to ChanStatusShutdownSent and that this delivery script +// can then be retrieved. +func TestDeliveryScript(t *testing.T) { + t.Parallel() + + fullDB, err := MakeTestDB(t) + require.NoError(t, err, "unable to make test database") + + cdb := fullDB.ChannelStateDB() + + // First a test channel. + channel := createTestChannel(t, cdb) + + // We haven't set a delivery script for this channel yet. + _, err = channel.DeliveryScript() + require.Error(t, err, ErrNoDeliveryScript) + + // Show that the channel status has not yet been updated. + require.False(t, channel.HasChanStatus(ChanStatusShutdownSent)) + require.False(t, channel.HasChanStatus(ChanStatusLocalCloseInitiator)) + + // Construct a new delivery script. + script := []byte{1, 3, 4, 5} + + require.NoError(t, channel.MarkShutdownSent(script, true)) + + // Assert that the status has been updated accordingly. + require.True(t, channel.HasChanStatus(ChanStatusShutdownSent)) + require.True(t, channel.HasChanStatus(ChanStatusLocalCloseInitiator)) + + // Finally, check that the delivery script has been persisted. + resScript, err := channel.DeliveryScript() + require.NoError(t, err) + require.Equal(t, script, resScript) +} + // TestRefresh asserts that Refresh updates the in-memory state of another // OpenChannel to reflect a preceding call to MarkOpen on a different // OpenChannel. diff --git a/docs/release-notes/release-notes-0.18.0.md b/docs/release-notes/release-notes-0.18.0.md index f95d4ec9e3f..277044b583b 100644 --- a/docs/release-notes/release-notes-0.18.0.md +++ b/docs/release-notes/release-notes-0.18.0.md @@ -73,6 +73,11 @@ a `shutdown` message if there were currently HTLCs on the channel. After this change, the shutdown procedure should be compliant with BOLT2 requirements. +* If HTLCs are in-flight at the same time that a `shutdown` is sent and then + a re-connect happens before the coop-close is completed we now [ensure that + we re-init the `shutdown` + exchange](https://github.com/lightningnetwork/lnd/pull/8447) + * The AMP struct in payment hops will [now be populated](https://github.com/lightningnetwork/lnd/pull/7976) when the AMP TLV is set. * [Add Taproot witness types diff --git a/htlcswitch/interfaces.go b/htlcswitch/interfaces.go index 4b3b5d9b415..cd3090a92ad 100644 --- a/htlcswitch/interfaces.go +++ b/htlcswitch/interfaces.go @@ -139,7 +139,7 @@ type ChannelUpdateHandler interface { // the state already allowed those adds. EnableAdds(direction LinkDirection) error - // DiableAdds sets the ChannelUpdateHandler state to allow + // DisableAdds sets the ChannelUpdateHandler state to allow // UpdateAddHtlc's in the specified direction. It returns an error if // the state already disallowed those adds. DisableAdds(direction LinkDirection) error diff --git a/htlcswitch/link.go b/htlcswitch/link.go index d8d947952c2..ab67e736b5e 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -636,7 +636,7 @@ func (l *channelLink) EnableAdds(linkDirection LinkDirection) error { return nil } -// DiableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in +// DisableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in // the specified direction. It returns an error if the state already disallowed // those adds. func (l *channelLink) DisableAdds(linkDirection LinkDirection) error { diff --git a/itest/lnd_coop_close_with_htlcs_test.go b/itest/lnd_coop_close_with_htlcs_test.go index 3978e119268..3e3c8a692d8 100644 --- a/itest/lnd_coop_close_with_htlcs_test.go +++ b/itest/lnd_coop_close_with_htlcs_test.go @@ -1,24 +1,44 @@ package itest import ( + "testing" + "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" + "github.com/lightningnetwork/lnd/lnrpc/walletrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" "github.com/stretchr/testify/require" ) -// testCoopCloseWithHtlcs tests whether or not we can successfully issue a coop -// close request whilt there are still active htlcs on the link. Here we will -// set up an HODL invoice to suspend settlement. Then we will attempt to close -// the channel which should appear as a noop for the time being. Then we will -// have the receiver settle the invoice and observe that the channel gets torn -// down after settlement. +// testCoopCloseWithHtlcs tests whether we can successfully issue a coop close +// request while there are still active htlcs on the link. In all the tests, we +// will set up an HODL invoice to suspend settlement. Then we will attempt to +// close the channel which should appear as a noop for the time being. Then we +// will have the receiver settle the invoice and observe that the channel gets +// torn down after settlement. func testCoopCloseWithHtlcs(ht *lntest.HarnessTest) { + ht.Run("no restart", func(t *testing.T) { + tt := ht.Subtest(t) + coopCloseWithHTLCs(tt) + }) + + ht.Run("with restart", func(t *testing.T) { + tt := ht.Subtest(t) + coopCloseWithHTLCsWithRestart(tt) + }) +} + +// coopCloseWithHTLCs tests the basic coop close scenario which occurs when one +// channel party initiates a channel shutdown while an HTLC is still pending on +// the channel. +func coopCloseWithHTLCs(ht *lntest.HarnessTest) { alice, bob := ht.Alice, ht.Bob + ht.ConnectNodes(alice, bob) // Here we set up a channel between Alice and Bob, beginning with a // balance on Bob's side. @@ -101,3 +121,133 @@ func testCoopCloseWithHtlcs(ht *lntest.HarnessTest) { // Wait for it to get mined and finish tearing down. ht.AssertStreamChannelCoopClosed(alice, chanPoint, false, closeClient) } + +// coopCloseWithHTLCsWithRestart also tests the coop close flow when an HTLC +// is still pending on the channel but this time it ensures that the shutdown +// process continues as expected even if a channel re-establish happens after +// one party has already initiated the shutdown. +func coopCloseWithHTLCsWithRestart(ht *lntest.HarnessTest) { + alice, bob := ht.Alice, ht.Bob + ht.ConnectNodes(alice, bob) + + // Open a channel between Alice and Bob with the balance split equally. + // We do this to ensure that the close transaction will have 2 outputs + // so that we can assert that the correct delivery address gets used by + // the channel close initiator. + chanPoint := ht.OpenChannel(bob, alice, lntest.OpenChannelParams{ + Amt: btcutil.Amount(1000000), + PushAmt: btcutil.Amount(1000000 / 2), + }) + + // Wait for Bob to understand that the channel is ready to use. + ht.AssertTopologyChannelOpen(bob, chanPoint) + + // Set up a HODL invoice so that we can be sure that an HTLC is pending + // on the channel at the time that shutdown is requested. + var preimage lntypes.Preimage + copy(preimage[:], ht.Random32Bytes()) + payHash := preimage.Hash() + + invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{ + Memo: "testing close", + Value: 400, + Hash: payHash[:], + } + resp := alice.RPC.AddHoldInvoice(invoiceReq) + invoiceStream := alice.RPC.SubscribeSingleInvoice(payHash[:]) + + // Wait for the invoice to be ready and payable. + ht.AssertInvoiceState(invoiceStream, lnrpc.Invoice_OPEN) + + // Now that the invoice is ready to be paid, let's have Bob open an HTLC + // for it. + req := &routerrpc.SendPaymentRequest{ + PaymentRequest: resp.PaymentRequest, + TimeoutSeconds: 60, + FeeLimitSat: 1000000, + } + ht.SendPaymentAndAssertStatus(bob, req, lnrpc.Payment_IN_FLIGHT) + ht.AssertNumActiveHtlcs(bob, 1) + + // Assert at this point that the HTLC is open but not yet settled. + ht.AssertInvoiceState(invoiceStream, lnrpc.Invoice_ACCEPTED) + + // We will now let Alice initiate the closure of the channel. We will + // also let her specify a specific delivery address to be used since we + // want to test that this same address is used in the Shutdown message + // on reconnection. + newAddr := alice.RPC.NewAddress(&lnrpc.NewAddressRequest{ + Type: AddrTypeWitnessPubkeyHash, + }) + + _ = alice.RPC.CloseChannel(&lnrpc.CloseChannelRequest{ + ChannelPoint: chanPoint, + NoWait: true, + DeliveryAddress: newAddr.Address, + }) + + // Assert that both nodes see the channel as waiting for close. + ht.AssertChannelWaitingClose(bob, chanPoint) + ht.AssertChannelWaitingClose(alice, chanPoint) + + // Now restart Alice and Bob. + ht.RestartNode(alice) + ht.RestartNode(bob) + + ht.AssertConnected(alice, bob) + + // Show that both nodes still see the channel as waiting for close after + // the restart. + ht.AssertChannelWaitingClose(bob, chanPoint) + ht.AssertChannelWaitingClose(alice, chanPoint) + + // Settle the invoice. + alice.RPC.SettleInvoice(preimage[:]) + + // Wait for the channel to appear in the waiting closed list. + var closingTxid string + err := wait.Predicate(func() bool { + pendingChansResp := alice.RPC.PendingChannels() + waitingClosed := pendingChansResp.WaitingCloseChannels + + if len(waitingClosed) != 1 { + return false + } + + closingTxid = waitingClosed[0].ClosingTxid + + return true + }, defaultTimeout) + require.NoError(ht, err) + + // Convert the txid we get from the PendingUpdate to a Hash so we can + // wait for it to be mined. + closeTxid, err := chainhash.NewHashFromStr(closingTxid) + require.NoError(ht, err) + + // Wait for the close tx to be in the Mempool and then mine 6 blocks + // to confirm the close. + ht.Miner.AssertTxInMempool(closeTxid) + ht.MineBlocksAndAssertNumTxes(6, 1) + + // Finally, we inspect the closing transaction here to show that the + // delivery address that Alice specified in her original close request + // is the one that ended up being used in the final closing transaction. + tx := alice.RPC.GetTransaction(&walletrpc.GetTransactionRequest{ + Txid: closingTxid, + }) + require.Len(ht, tx.OutputDetails, 2) + + // Find Alice's output in the coop-close transaction. + var outputDetail *lnrpc.OutputDetail + for _, output := range tx.OutputDetails { + if output.IsOurAddress { + outputDetail = output + break + } + } + require.NotNil(ht, outputDetail) + + // Show that the address used is the one she requested. + require.Equal(ht, outputDetail.Address, newAddr.Address) +} diff --git a/lnwallet/chancloser/chancloser.go b/lnwallet/chancloser/chancloser.go index d33bfc4f0b9..758080a2503 100644 --- a/lnwallet/chancloser/chancloser.go +++ b/lnwallet/chancloser/chancloser.go @@ -353,6 +353,17 @@ func (c *ChanCloser) initChanShutdown() (*lnwire.Shutdown, error) { chancloserLog.Infof("ChannelPoint(%v): sending shutdown message", c.chanPoint) + // At this point, we change the channel status to + // ChanStatusShutdownSent and persist the delivery script that we are + // about to send along with the Shutdown message. This is to ensure that + // if a reconnect happens, that we use the same delivery script. + err := c.cfg.Channel.MarkShutdownSent( + c.localDeliveryScript, c.locallyInitiated, + ) + if err != nil { + return nil, err + } + return shutdown, nil } diff --git a/lnwallet/chancloser/chancloser_test.go b/lnwallet/chancloser/chancloser_test.go index 807668a1956..e4360706014 100644 --- a/lnwallet/chancloser/chancloser_test.go +++ b/lnwallet/chancloser/chancloser_test.go @@ -154,6 +154,10 @@ func (m *mockChannel) MarkCoopBroadcasted(*wire.MsgTx, bool) error { return nil } +func (m *mockChannel) MarkShutdownSent([]byte, bool) error { + return nil +} + func (m *mockChannel) IsInitiator() bool { return m.initiator } diff --git a/lnwallet/chancloser/interface.go b/lnwallet/chancloser/interface.go index 9d588d521ad..13f5a529225 100644 --- a/lnwallet/chancloser/interface.go +++ b/lnwallet/chancloser/interface.go @@ -35,6 +35,10 @@ type Channel interface { //nolint:interfacebloat // transaction has been broadcast. MarkCoopBroadcasted(*wire.MsgTx, bool) error + // MarkShutdownSent persistently marks that the shutdown of a channel + // has been initiated. + MarkShutdownSent([]byte, bool) error + // IsInitiator returns true we are the initiator of the channel. IsInitiator() bool diff --git a/lnwallet/channel.go b/lnwallet/channel.go index 4d2f798dfcc..e5f9ee7ed97 100644 --- a/lnwallet/channel.go +++ b/lnwallet/channel.go @@ -7136,6 +7136,7 @@ func newOutgoingHtlcResolution(signer input.Signer, localDelayTweak := input.SingleTweakBytes( keyRing.CommitPoint, localChanCfg.DelayBasePoint.PubKey, ) + return &OutgoingHtlcResolution{ Expiry: htlc.RefundTimeout, SignedTimeoutTx: timeoutTx, @@ -8860,6 +8861,19 @@ func (lc *LightningChannel) MarkCoopBroadcasted(tx *wire.MsgTx, return lc.channelState.MarkCoopBroadcasted(tx, localInitiated) } +// MarkShutdownSent updates the channel's status to indicate that shutdown has +// been initiated. It also persists the delivery address that we intend to use +// during shutdown in order to ensure that the same delivery address is used +// across reconnects. +func (lc *LightningChannel) MarkShutdownSent(deliveryAddr []byte, + localInitiated bool) error { + + lc.Lock() + defer lc.Unlock() + + return lc.channelState.MarkShutdownSent(deliveryAddr, localInitiated) +} + // MarkDataLoss marks sets the channel status to LocalDataLoss and stores the // passed commitPoint for use to retrieve funds in case the remote force closes // the channel. diff --git a/peer/brontide.go b/peer/brontide.go index cdcbab3b348..e474e83487e 100644 --- a/peer/brontide.go +++ b/peer/brontide.go @@ -903,10 +903,14 @@ func (p *Brontide) loadActiveChannels(chans []*channeldb.OpenChannel) ( // Check if this channel needs to have the cooperative // close process restarted. If so, we'll need to send // the Shutdown message that is returned. - if dbChan.HasChanStatus( + sentShutdown := dbChan.HasChanStatus( + channeldb.ChanStatusShutdownSent, + ) + coopCloseBroadcast := dbChan.HasChanStatus( channeldb.ChanStatusCoopBroadcasted, - ) { + ) + if sentShutdown || coopCloseBroadcast { shutdownMsg, err := p.restartCoopClose(lnChan) if err != nil { p.log.Errorf("Unable to restart "+ @@ -2760,16 +2764,16 @@ func chooseDeliveryScript(upfront, return requested, nil } - // If an upfront shutdown script was provided, and the user did not request - // a custom shutdown script, return the upfront address. + // If an upfront shutdown script was provided, and the user did not + // request a custom shutdown script, return the upfront address. if len(requested) == 0 { return upfront, nil } // If both an upfront shutdown script and a custom close script were // provided, error if the user provided shutdown script does not match - // the upfront shutdown script (because closing out to a different script - // would violate upfront shutdown). + // the upfront shutdown script (because closing out to a different + // script would violate upfront shutdown). if !bytes.Equal(upfront, requested) { return nil, chancloser.ErrUpfrontShutdownScriptMismatch } @@ -2787,10 +2791,7 @@ func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) ( // If this channel has status ChanStatusCoopBroadcasted and does not // have a closing transaction, then the cooperative close process was // started but never finished. We'll re-create the chanCloser state - // machine and resend Shutdown. BOLT#2 requires that we retransmit - // Shutdown exactly, but doing so would mean persisting the RPC - // provided close script. Instead use the LocalUpfrontShutdownScript - // or generate a script. + // machine and resend Shutdown. c := lnChan.State() _, err := c.BroadcastedCooperative() if err != nil && err != channeldb.ErrNoCloseTx { @@ -2802,15 +2803,28 @@ func (p *Brontide) restartCoopClose(lnChan *lnwallet.LightningChannel) ( return nil, nil } - // As mentioned above, we don't re-create the delivery script. - deliveryScript := c.LocalShutdownScript - if len(deliveryScript) == 0 { - var err error - deliveryScript, err = p.genDeliveryScript() - if err != nil { - p.log.Errorf("unable to gen delivery script: %v", - err) - return nil, fmt.Errorf("close addr unavailable") + // If the channel has status ChanStatusShutdownSent, then we have + // persisted the delivery script that we should use in the Shutdown + // message. + deliveryScript, err := c.DeliveryScript() + switch { + // An error other than ErrNoDeliveryScript was encountered. + case err != nil && !errors.Is(err, channeldb.ErrNoDeliveryScript): + return nil, err + + // No delivery script was found. This means that we have not yet sent + // shutdown, and so we choose an appropriate script here. + case errors.Is(err, channeldb.ErrNoDeliveryScript): + deliveryScript = c.LocalShutdownScript + if len(deliveryScript) == 0 { + var err error + deliveryScript, err = p.genDeliveryScript() + if err != nil { + p.log.Errorf("unable to gen delivery script: "+ + "%v", err) + + return nil, fmt.Errorf("close addr unavailable") + } } }