Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
93 changes: 93 additions & 0 deletions channeldb/channel.go
Original file line number Diff line number Diff line change
Expand Up @@ -121,6 +121,11 @@ var (
// broadcasted when moving the channel to state CoopBroadcasted.
coopCloseTxKey = []byte("coop-closing-tx-key")

// deliveryScriptKey points to the delivery script to be sent along in
// the Shutdown message. This is committed when moving the channel to
// state ShutdownSent.
deliveryScriptKey = []byte("delivery-script-key")

// commitDiffKey stores the current pending commitment state we've
// extended to the remote party (if any). Each time we propose a new
// state, we store the information necessary to reconstruct this state
Expand Down Expand Up @@ -188,6 +193,10 @@ var (
// in the state CommitBroadcasted.
ErrNoCloseTx = fmt.Errorf("no closing tx found")

// ErrNoDeliveryScript is returned when no closing delivery script has
// been persisted for a channel.
ErrNoDeliveryScript = fmt.Errorf("no delivery script")

// ErrNoRestoredChannelMutation is returned when a caller attempts to
// mutate a channel that's been recovered.
ErrNoRestoredChannelMutation = fmt.Errorf("cannot mutate restored " +
Expand Down Expand Up @@ -597,6 +606,11 @@ var (
// ChanStatusRemoteCloseInitiator indicates that the remote node
// initiated closing the channel.
ChanStatusRemoteCloseInitiator ChannelStatus = 1 << 6

// ChanStatusShutdownSent indicates that a shutdown has been initiated.
// The ChanStatusLocalCloseInitiator and ChanStatusRemoteCloseInitiator
// flags are used to determine which party initiated the shutdown.
ChanStatusShutdownSent ChannelStatus = 1 << 7
)

// chanStatusStrings maps a ChannelStatus to a human friendly string that
Expand All @@ -607,6 +621,7 @@ var chanStatusStrings = map[ChannelStatus]string{
ChanStatusCommitBroadcasted: "ChanStatusCommitBroadcasted",
ChanStatusLocalDataLoss: "ChanStatusLocalDataLoss",
ChanStatusRestored: "ChanStatusRestored",
ChanStatusShutdownSent: "ChanStatusShutdownSent",
ChanStatusCoopBroadcasted: "ChanStatusCoopBroadcasted",
ChanStatusLocalCloseInitiator: "ChanStatusLocalCloseInitiator",
ChanStatusRemoteCloseInitiator: "ChanStatusRemoteCloseInitiator",
Expand All @@ -618,6 +633,7 @@ var orderedChanStatusFlags = []ChannelStatus{
ChanStatusCommitBroadcasted,
ChanStatusLocalDataLoss,
ChanStatusRestored,
ChanStatusShutdownSent,
ChanStatusCoopBroadcasted,
ChanStatusLocalCloseInitiator,
ChanStatusRemoteCloseInitiator,
Expand Down Expand Up @@ -1589,6 +1605,83 @@ func (c *OpenChannel) isBorked(chanBucket kvdb.RBucket) (bool, error) {
return channel.chanStatus != ChanStatusDefault, nil
}

// MarkShutdownSent updates the channel status to indicate that a shutdown has
// been initiated. It also persists the delivery script that we will use in
// the Shutdown message so that we can guarantee that the same delivery script
// is used if a re-connect happens.
func (c *OpenChannel) MarkShutdownSent(localDeliveryScript []byte,
locallyInitiated bool) error {

return c.markShutdownSent(localDeliveryScript, locallyInitiated)
}

// markShutdownSent is a helper function which modifies the channel status of
// the receiving channel and inserts a delivery script under the delivery script
// key. It adds a status which indicates the party that initiated the channel
// close.
func (c *OpenChannel) markShutdownSent(deliveryScript []byte,
locallyInitiated bool) error {

c.Lock()
defer c.Unlock()

putDeliveryScript := func(chanBucket kvdb.RwBucket) error {
return chanBucket.Put(deliveryScriptKey, deliveryScript)
}

status := ChanStatusShutdownSent
if locallyInitiated {
status |= ChanStatusLocalCloseInitiator
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can't add another channel status here because on restart, execution will hit this if statement and the link won't be loaded:

lnd/peer/brontide.go

Lines 881 to 882 in cf4f468

if !dbChan.HasChanStatus(channeldb.ChanStatusDefault) &&
!dbChan.HasChanStatus(channeldb.ChanStatusRestored) {

The reason the itest works is that coop close then proceeds via restartCoopClose, but when the coop close transaction is being made it will just delete the HTLC and burn it to fees. This can be seen if you log the coop close transaction and proposedFee here:

lnd/lnwallet/channel.go

Lines 7821 to 7825 in cf4f468

closeTx := CreateCooperativeCloseTx(
fundingTxIn(lc.channelState), lc.channelState.LocalChanCfg.DustLimit,
lc.channelState.RemoteChanCfg.DustLimit, ourBalance, theirBalance,
localDeliveryScript, remoteDeliveryScript, closeTxOpts...,
)

It can also be observed because no UpdateFulfillHTLC is sent during the itest.

Instead, we should just be able to use the existence of a delivery script to determine whether or not we need to continue coop close

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion: I think we should adjust the checks that you're referencing @Crypt-iQ as opposed to refraining from updating statuses according to the events that transpire.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We'd still need the checks for older nodes, so this would be adding a special case and duplicating some logic. Ultimately, loading the link with a non-ChanStatusDefault state means refactoring the way. the channel status is used because the link performs the isBorked check before trying to update anything. The way the check is written currently means it would fail for ChanStatusShutdownSent. It's certainly doable, but I don't think changing it is worth the risk

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The issue is that it isn't default though. We're definitely in a different state that isn't normal operation and can't accept new HTLC adds. Can you clarify what "the risk" is? I'm trying to weigh the cost of having increasingly fragmented logic (where consequences of changing stuff leaks like it is doing now) vs the cost of fixing things so the code straightforwardly reflects what is supposed to happen.

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's no issue with using ChanStatusDefault here, the link just needs to be aware that it should send shutdown. The risk is that we make a costly mistake when changing how the status field is used

Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think there's a way to navigate this. For one, it's not really functioning as a "status" as it's a bit-vector. Instead it's a collection of flags. So really what the ChanStatusDefault check is doing is trying to check if specific flags are unset, and act accordingly. I think perhaps inverting the check and rather than checking "is it default", we should check "is it not have any one of these conditions". We can always push it to an extra field but I do think that the way we handle ChannelStatus right now is very fragile and should be reworked to be more structurally correct. Maybe an issue for a less tactical PR though.

} else {
status |= ChanStatusRemoteCloseInitiator
}

return c.putChanStatus(status, putDeliveryScript)
}

// DeliveryScript returns the delivery script to use in the channel's Shutdown
// message. ErrNoDeliveryScript is returned if no such delivery script has been
// persisted yet.
func (c *OpenChannel) DeliveryScript() ([]byte, error) {
var deliveryScript []byte

err := kvdb.View(c.Db.backend, func(tx kvdb.RTx) error {
chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
)
switch {
case err == nil:
case errors.Is(err, ErrNoChanDBExists),
errors.Is(err, ErrNoActiveChannels),
errors.Is(err, ErrChannelNotFound):

return ErrNoDeliveryScript
default:
return err
}

delScriptBytes := chanBucket.Get(deliveryScriptKey)
if delScriptBytes == nil {
return ErrNoDeliveryScript
}

// Make a copy of the returned address bytes here since the
// value returned by Get is not safe to use outside of this
// transaction.
deliveryScript = make([]byte, len(delScriptBytes))
copy(deliveryScript, delScriptBytes)

return nil
}, func() {
deliveryScript = nil
})
if err != nil {
return nil, err
}

return deliveryScript, nil
}

// MarkCommitmentBroadcasted marks the channel as a commitment transaction has
// been broadcast, either our own or the remote, and we should watch the chain
// for it to confirm before taking any further action. It takes as argument the
Expand Down
37 changes: 37 additions & 0 deletions channeldb/channel_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1158,6 +1158,43 @@ func TestFetchWaitingCloseChannels(t *testing.T) {
}
}

// TestDeliveryScript tests that a channel's delivery script can be set when
// its status is updated to ChanStatusShutdownSent and that this delivery script
// can then be retrieved.
func TestDeliveryScript(t *testing.T) {
t.Parallel()

fullDB, err := MakeTestDB(t)
require.NoError(t, err, "unable to make test database")

cdb := fullDB.ChannelStateDB()

// First a test channel.
channel := createTestChannel(t, cdb)

// We haven't set a delivery script for this channel yet.
_, err = channel.DeliveryScript()
require.Error(t, err, ErrNoDeliveryScript)

// Show that the channel status has not yet been updated.
require.False(t, channel.HasChanStatus(ChanStatusShutdownSent))
require.False(t, channel.HasChanStatus(ChanStatusLocalCloseInitiator))

// Construct a new delivery script.
script := []byte{1, 3, 4, 5}

require.NoError(t, channel.MarkShutdownSent(script, true))

// Assert that the status has been updated accordingly.
require.True(t, channel.HasChanStatus(ChanStatusShutdownSent))
require.True(t, channel.HasChanStatus(ChanStatusLocalCloseInitiator))

// Finally, check that the delivery script has been persisted.
resScript, err := channel.DeliveryScript()
require.NoError(t, err)
require.Equal(t, script, resScript)
}

// TestRefresh asserts that Refresh updates the in-memory state of another
// OpenChannel to reflect a preceding call to MarkOpen on a different
// OpenChannel.
Expand Down
5 changes: 5 additions & 0 deletions docs/release-notes/release-notes-0.18.0.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,11 @@
a `shutdown` message if there were currently HTLCs on the channel. After this
change, the shutdown procedure should be compliant with BOLT2 requirements.

* If HTLCs are in-flight at the same time that a `shutdown` is sent and then
a re-connect happens before the coop-close is completed we now [ensure that
we re-init the `shutdown`
exchange](https://github.com/lightningnetwork/lnd/pull/8447)

* The AMP struct in payment hops will [now be populated](https://github.com/lightningnetwork/lnd/pull/7976) when the AMP TLV is set.

* [Add Taproot witness types
Expand Down
2 changes: 1 addition & 1 deletion htlcswitch/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ type ChannelUpdateHandler interface {
// the state already allowed those adds.
EnableAdds(direction LinkDirection) error

// DiableAdds sets the ChannelUpdateHandler state to allow
// DisableAdds sets the ChannelUpdateHandler state to allow
// UpdateAddHtlc's in the specified direction. It returns an error if
// the state already disallowed those adds.
DisableAdds(direction LinkDirection) error
Expand Down
2 changes: 1 addition & 1 deletion htlcswitch/link.go
Original file line number Diff line number Diff line change
Expand Up @@ -636,7 +636,7 @@ func (l *channelLink) EnableAdds(linkDirection LinkDirection) error {
return nil
}

// DiableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in
// DisableAdds sets the ChannelUpdateHandler state to allow UpdateAddHtlc's in
// the specified direction. It returns an error if the state already disallowed
// those adds.
func (l *channelLink) DisableAdds(linkDirection LinkDirection) error {
Expand Down
162 changes: 156 additions & 6 deletions itest/lnd_coop_close_with_htlcs_test.go
Original file line number Diff line number Diff line change
@@ -1,24 +1,44 @@
package itest

import (
"testing"

"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lnrpc/invoicesrpc"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
"github.com/lightningnetwork/lnd/lnrpc/walletrpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lntypes"
"github.com/stretchr/testify/require"
)

// testCoopCloseWithHtlcs tests whether or not we can successfully issue a coop
// close request whilt there are still active htlcs on the link. Here we will
// set up an HODL invoice to suspend settlement. Then we will attempt to close
// the channel which should appear as a noop for the time being. Then we will
// have the receiver settle the invoice and observe that the channel gets torn
// down after settlement.
// testCoopCloseWithHtlcs tests whether we can successfully issue a coop close
// request while there are still active htlcs on the link. In all the tests, we
// will set up an HODL invoice to suspend settlement. Then we will attempt to
// close the channel which should appear as a noop for the time being. Then we
// will have the receiver settle the invoice and observe that the channel gets
// torn down after settlement.
func testCoopCloseWithHtlcs(ht *lntest.HarnessTest) {
ht.Run("no restart", func(t *testing.T) {
tt := ht.Subtest(t)
coopCloseWithHTLCs(tt)
})

ht.Run("with restart", func(t *testing.T) {
tt := ht.Subtest(t)
coopCloseWithHTLCsWithRestart(tt)
})
}

// coopCloseWithHTLCs tests the basic coop close scenario which occurs when one
// channel party initiates a channel shutdown while an HTLC is still pending on
// the channel.
func coopCloseWithHTLCs(ht *lntest.HarnessTest) {
alice, bob := ht.Alice, ht.Bob
ht.ConnectNodes(alice, bob)

// Here we set up a channel between Alice and Bob, beginning with a
// balance on Bob's side.
Expand Down Expand Up @@ -101,3 +121,133 @@ func testCoopCloseWithHtlcs(ht *lntest.HarnessTest) {
// Wait for it to get mined and finish tearing down.
ht.AssertStreamChannelCoopClosed(alice, chanPoint, false, closeClient)
}

// coopCloseWithHTLCsWithRestart also tests the coop close flow when an HTLC
// is still pending on the channel but this time it ensures that the shutdown
// process continues as expected even if a channel re-establish happens after
// one party has already initiated the shutdown.
func coopCloseWithHTLCsWithRestart(ht *lntest.HarnessTest) {
alice, bob := ht.Alice, ht.Bob
ht.ConnectNodes(alice, bob)

// Open a channel between Alice and Bob with the balance split equally.
// We do this to ensure that the close transaction will have 2 outputs
// so that we can assert that the correct delivery address gets used by
// the channel close initiator.
chanPoint := ht.OpenChannel(bob, alice, lntest.OpenChannelParams{
Amt: btcutil.Amount(1000000),
PushAmt: btcutil.Amount(1000000 / 2),
})

// Wait for Bob to understand that the channel is ready to use.
ht.AssertTopologyChannelOpen(bob, chanPoint)

// Set up a HODL invoice so that we can be sure that an HTLC is pending
// on the channel at the time that shutdown is requested.
var preimage lntypes.Preimage
copy(preimage[:], ht.Random32Bytes())
payHash := preimage.Hash()

invoiceReq := &invoicesrpc.AddHoldInvoiceRequest{
Memo: "testing close",
Value: 400,
Hash: payHash[:],
}
resp := alice.RPC.AddHoldInvoice(invoiceReq)
invoiceStream := alice.RPC.SubscribeSingleInvoice(payHash[:])

// Wait for the invoice to be ready and payable.
ht.AssertInvoiceState(invoiceStream, lnrpc.Invoice_OPEN)

// Now that the invoice is ready to be paid, let's have Bob open an HTLC
// for it.
req := &routerrpc.SendPaymentRequest{
PaymentRequest: resp.PaymentRequest,
TimeoutSeconds: 60,
FeeLimitSat: 1000000,
}
ht.SendPaymentAndAssertStatus(bob, req, lnrpc.Payment_IN_FLIGHT)
ht.AssertNumActiveHtlcs(bob, 1)

// Assert at this point that the HTLC is open but not yet settled.
ht.AssertInvoiceState(invoiceStream, lnrpc.Invoice_ACCEPTED)

// We will now let Alice initiate the closure of the channel. We will
// also let her specify a specific delivery address to be used since we
// want to test that this same address is used in the Shutdown message
// on reconnection.
newAddr := alice.RPC.NewAddress(&lnrpc.NewAddressRequest{
Type: AddrTypeWitnessPubkeyHash,
})

_ = alice.RPC.CloseChannel(&lnrpc.CloseChannelRequest{
ChannelPoint: chanPoint,
NoWait: true,
DeliveryAddress: newAddr.Address,
})

// Assert that both nodes see the channel as waiting for close.
ht.AssertChannelWaitingClose(bob, chanPoint)
ht.AssertChannelWaitingClose(alice, chanPoint)

// Now restart Alice and Bob.
ht.RestartNode(alice)
ht.RestartNode(bob)

ht.AssertConnected(alice, bob)

// Show that both nodes still see the channel as waiting for close after
// the restart.
ht.AssertChannelWaitingClose(bob, chanPoint)
ht.AssertChannelWaitingClose(alice, chanPoint)

// Settle the invoice.
alice.RPC.SettleInvoice(preimage[:])

// Wait for the channel to appear in the waiting closed list.
var closingTxid string
err := wait.Predicate(func() bool {
pendingChansResp := alice.RPC.PendingChannels()
waitingClosed := pendingChansResp.WaitingCloseChannels

if len(waitingClosed) != 1 {
return false
}

closingTxid = waitingClosed[0].ClosingTxid

return true
}, defaultTimeout)
require.NoError(ht, err)

// Convert the txid we get from the PendingUpdate to a Hash so we can
// wait for it to be mined.
closeTxid, err := chainhash.NewHashFromStr(closingTxid)
require.NoError(ht, err)

// Wait for the close tx to be in the Mempool and then mine 6 blocks
// to confirm the close.
ht.Miner.AssertTxInMempool(closeTxid)
ht.MineBlocksAndAssertNumTxes(6, 1)

// Finally, we inspect the closing transaction here to show that the
// delivery address that Alice specified in her original close request
// is the one that ended up being used in the final closing transaction.
tx := alice.RPC.GetTransaction(&walletrpc.GetTransactionRequest{
Txid: closingTxid,
})
require.Len(ht, tx.OutputDetails, 2)

// Find Alice's output in the coop-close transaction.
var outputDetail *lnrpc.OutputDetail
for _, output := range tx.OutputDetails {
if output.IsOurAddress {
outputDetail = output
break
}
}
require.NotNil(ht, outputDetail)

// Show that the address used is the one she requested.
require.Equal(ht, outputDetail.Address, newAddr.Address)
}
Loading