diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala b/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala index 958cd6d49f..67ed2193d6 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/channel/Helpers.scala @@ -925,42 +925,48 @@ object Helpers { } /** - * Claim the output of a local commit tx corresponding to HTLCs. + * Claim the outputs of a local commit tx corresponding to HTLCs. */ def claimHtlcOutputs(keyManager: ChannelKeyManager, commitment: FullCommitment)(implicit log: LoggingAdapter): Map[OutPoint, Option[HtlcTx]] = { val channelKeyPath = keyManager.keyPath(commitment.localParams, commitment.params.channelConfig) val localPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, commitment.localCommit.index.toInt) - // those are the preimages to existing received htlcs + // We collect all the preimages we wanted to reveal to our peer. val hash2Preimage: Map[ByteVector32, ByteVector32] = commitment.changes.localChanges.all.collect { case u: UpdateFulfillHtlc => u.paymentPreimage }.map(r => Crypto.sha256(r) -> r).toMap + // We collect incoming HTLCs that we starting failing but didn't cross-sign. val failedIncomingHtlcs: Set[Long] = commitment.changes.localChanges.all.collect { case u: UpdateFailHtlc => u.id case u: UpdateFailMalformedHtlc => u.id }.toSet - // these htlcs have been signed by our peer, but we haven't received their revocation and relayed them yet + // We collect incoming HTLCs that we haven't relayed: they may have been signed by our peer, but we haven't + // received their revocation yet. val nonRelayedIncomingHtlcs: Set[Long] = commitment.changes.remoteChanges.all.collect { case add: UpdateAddHtlc => add.id }.toSet commitment.localCommit.htlcTxsAndRemoteSigs.collect { case HtlcTxAndRemoteSig(txInfo@HtlcSuccessTx(_, _, paymentHash, _, _), remoteSig) => if (hash2Preimage.contains(paymentHash)) { - // incoming htlc for which we have the preimage: we can spend it immediately + // We immediately spend incoming htlcs for which we have the preimage. Some(txInfo.input.outPoint -> withTxGenerationLog("htlc-success") { val localSig = keyManager.sign(txInfo, keyManager.htlcPoint(channelKeyPath), localPerCommitmentPoint, TxOwner.Local, commitment.params.commitmentFormat) Right(Transactions.addSigs(txInfo, localSig, remoteSig, hash2Preimage(paymentHash), commitment.params.commitmentFormat)) }) } else if (failedIncomingHtlcs.contains(txInfo.htlcId)) { - // incoming htlc that we know for sure will never be fulfilled downstream: we can safely discard it + // We can ignore incoming htlcs that we started failing: our peer will claim them after the timeout. + // We don't track those outputs because we want to move to the CLOSED state even if our peer never claims them. None } else if (nonRelayedIncomingHtlcs.contains(txInfo.htlcId)) { - // incoming htlc that we haven't relayed yet: we can safely discard it, our peer will claim it once it times out + // Similarly, we can also ignore incoming htlcs that we haven't relayed, because we can't receive the preimage. None } else { - // incoming htlc for which we don't have the preimage: we can't spend it immediately, but we may learn the - // preimage later, otherwise it will eventually timeout and they will get their funds back + // For all other incoming htlcs, we may receive the preimage later from downstream. We thus want to track + // the corresponding outputs to ensure we don't move to the CLOSED state until they've been spent, either + // by us if we receive the preimage, or by our peer after the timeout. Some(txInfo.input.outPoint -> None) } case HtlcTxAndRemoteSig(txInfo: HtlcTimeoutTx, remoteSig) => - // outgoing htlc: they may or may not have the preimage, the only thing to do is try to get back our funds after timeout + // We track all outputs that belong to outgoing htlcs. Our peer may or may not have the preimage: if they + // claim the output, we will learn the preimage from their transaction, otherwise we will get our funds + // back after the timeout. Some(txInfo.input.outPoint -> withTxGenerationLog("htlc-timeout") { val localSig = keyManager.sign(txInfo, keyManager.htlcPoint(channelKeyPath), localPerCommitmentPoint, TxOwner.Local, commitment.params.commitmentFormat) Right(Transactions.addSigs(txInfo, localSig, remoteSig, commitment.params.commitmentFormat)) @@ -975,20 +981,22 @@ object Helpers { * doing that because it introduces a lot of subtle edge cases. */ def claimHtlcDelayedOutput(localCommitPublished: LocalCommitPublished, keyManager: ChannelKeyManager, commitment: FullCommitment, tx: Transaction, feerates: FeeratesPerKw, onChainFeeConf: OnChainFeeConf, finalScriptPubKey: ByteVector)(implicit log: LoggingAdapter): (LocalCommitPublished, Option[HtlcDelayedTx]) = { - if (isHtlcSuccess(tx, localCommitPublished) || isHtlcTimeout(tx, localCommitPublished)) { + if (tx.txIn.exists(txIn => localCommitPublished.htlcTxs.contains(txIn.outPoint))) { val feeratePerKwDelayed = onChainFeeConf.getClosingFeerate(feerates) val channelKeyPath = keyManager.keyPath(commitment.localParams, commitment.params.channelConfig) val localPerCommitmentPoint = keyManager.commitmentPoint(channelKeyPath, commitment.localCommit.index.toInt) val localRevocationPubkey = Generators.revocationPubKey(commitment.remoteParams.revocationBasepoint, localPerCommitmentPoint) val localDelayedPubkey = Generators.derivePubKey(keyManager.delayedPaymentPoint(channelKeyPath).publicKey, localPerCommitmentPoint) - val htlcDelayedTx = withTxGenerationLog("htlc-delayed") { + val htlcDelayedTx_opt = withTxGenerationLog("htlc-delayed") { + // Note that this will return None if the transaction wasn't one of our HTLC transactions, which may happen + // if our peer was able to claim the HTLC output before us (race condition between success and timeout). Transactions.makeHtlcDelayedTx(tx, commitment.localParams.dustLimit, localRevocationPubkey, commitment.remoteParams.toSelfDelay, localDelayedPubkey, finalScriptPubKey, feeratePerKwDelayed).map(claimDelayed => { val sig = keyManager.sign(claimDelayed, keyManager.delayedPaymentPoint(channelKeyPath), localPerCommitmentPoint, TxOwner.Local, commitment.params.commitmentFormat) Transactions.addSigs(claimDelayed, sig) }) } - val localCommitPublished1 = localCommitPublished.copy(claimHtlcDelayedTxs = localCommitPublished.claimHtlcDelayedTxs ++ htlcDelayedTx.toSeq) - (localCommitPublished1, htlcDelayedTx) + val localCommitPublished1 = localCommitPublished.copy(claimHtlcDelayedTxs = localCommitPublished.claimHtlcDelayedTxs ++ htlcDelayedTx_opt.toSeq) + (localCommitPublished1, htlcDelayedTx_opt) } else { (localCommitPublished, None) } @@ -1080,7 +1088,7 @@ object Helpers { } /** - * Claim our htlc outputs only + * Claim our htlc outputs only from the remote commitment. */ def claimHtlcOutputs(keyManager: ChannelKeyManager, commitment: FullCommitment, remoteCommit: RemoteCommit, feerates: FeeratesPerKw, finalScriptPubKey: ByteVector)(implicit log: LoggingAdapter): Map[OutPoint, Option[ClaimHtlcTx]] = { val (remoteCommitTx, _) = Commitment.makeRemoteTxs(keyManager, commitment.params.channelConfig, commitment.params.channelFeatures, remoteCommit.index, commitment.localParams, commitment.remoteParams, commitment.fundingTxIndex, commitment.remoteFundingPubKey, commitment.commitInput, remoteCommit.remotePerCommitmentPoint, remoteCommit.spec) @@ -1094,41 +1102,52 @@ object Helpers { val localPaymentBasepoint = commitment.localParams.walletStaticPaymentBasepoint.getOrElse(keyManager.paymentPoint(channelKeyPath).publicKey) val localPaymentPubkey = if (commitment.params.channelFeatures.hasFeature(Features.StaticRemoteKey)) localPaymentBasepoint else Generators.derivePubKey(localPaymentBasepoint, remoteCommit.remotePerCommitmentPoint) val outputs = makeCommitTxOutputs(!commitment.localParams.paysCommitTxFees, commitment.remoteParams.dustLimit, remoteRevocationPubkey, commitment.localParams.toSelfDelay, remoteDelayedPaymentPubkey, localPaymentPubkey, remoteHtlcPubkey, localHtlcPubkey, commitment.remoteFundingPubKey, localFundingPubkey, remoteCommit.spec, commitment.params.commitmentFormat) - // we need to use a rather high fee for htlc-claim because we compete with the counterparty + // We need to use a rather high fee for htlc-claim because we compete with the counterparty. val feeratePerKwHtlc = feerates.fast - // those are the preimages to existing received htlcs + // We collect all the preimages we wanted to reveal to our peer. val hash2Preimage: Map[ByteVector32, ByteVector32] = commitment.changes.localChanges.all.collect { case u: UpdateFulfillHtlc => u.paymentPreimage }.map(r => Crypto.sha256(r) -> r).toMap + // We collect incoming HTLCs that we starting failing but didn't cross-sign. val failedIncomingHtlcs: Set[Long] = commitment.changes.localChanges.all.collect { case u: UpdateFailHtlc => u.id case u: UpdateFailMalformedHtlc => u.id }.toSet + // We collect incoming HTLCs that we haven't relayed: they may have been signed by our peer, but they haven't + // sent their revocation yet. + val nonRelayedIncomingHtlcs: Set[Long] = commitment.changes.remoteChanges.all.collect { case add: UpdateAddHtlc => add.id }.toSet - // remember we are looking at the remote commitment so IN for them is really OUT for us and vice versa + // Remember we are looking at the remote commitment so IN for them is really OUT for us and vice versa. remoteCommit.spec.htlcs.collect { case OutgoingHtlc(add: UpdateAddHtlc) => - // NB: we first generate the tx skeleton and finalize it below if we have the preimage, so we set logSuccess to false to avoid logging twice + // NB: we first generate the tx skeleton and finalize it below if we have the preimage, so we set logSuccess to false to avoid logging twice. withTxGenerationLog("claim-htlc-success", logSuccess = false) { Transactions.makeClaimHtlcSuccessTx(remoteCommitTx.tx, outputs, commitment.localParams.dustLimit, localHtlcPubkey, remoteHtlcPubkey, remoteRevocationPubkey, finalScriptPubKey, add, feeratePerKwHtlc, commitment.params.commitmentFormat) }.map(claimHtlcTx => { if (hash2Preimage.contains(add.paymentHash)) { - // incoming htlc for which we have the preimage: we can spend it immediately + // We immediately spend incoming htlcs for which we have the preimage. Some(claimHtlcTx.input.outPoint -> withTxGenerationLog("claim-htlc-success") { val sig = keyManager.sign(claimHtlcTx, keyManager.htlcPoint(channelKeyPath), remoteCommit.remotePerCommitmentPoint, TxOwner.Local, commitment.params.commitmentFormat) Right(Transactions.addSigs(claimHtlcTx, sig, hash2Preimage(add.paymentHash))) }) } else if (failedIncomingHtlcs.contains(add.id)) { - // incoming htlc that we know for sure will never be fulfilled downstream: we can safely discard it + // We can ignore incoming htlcs that we started failing: our peer will claim them after the timeout. + // We don't track those outputs because we want to move to the CLOSED state even if our peer never claims them. + None + } else if (nonRelayedIncomingHtlcs.contains(add.id)) { + // Similarly, we can also ignore incoming htlcs that we haven't relayed, because we can't receive the preimage. None } else { - // incoming htlc for which we don't have the preimage: we can't spend it immediately, but we may learn the - // preimage later, otherwise it will eventually timeout and they will get their funds back + // For all other incoming htlcs, we may receive the preimage later from downstream. We thus want to track + // the corresponding outputs to ensure we don't move to the CLOSED state until they've been spent, either + // by us if we receive the preimage, or by our peer after the timeout. Some(claimHtlcTx.input.outPoint -> None) } }) case IncomingHtlc(add: UpdateAddHtlc) => - // outgoing htlc: they may or may not have the preimage, the only thing to do is try to get back our funds after timeout - // NB: we first generate the tx skeleton and finalize it below, so we set logSuccess to false to avoid logging twice + // We track all outputs that belong to outgoing htlcs. Our peer may or may not have the preimage: if they + // claim the output, we will learn the preimage from their transaction, otherwise we will get our funds + // back after the timeout. + // NB: we first generate the tx skeleton and finalize it below, so we set logSuccess to false to avoid logging twice. withTxGenerationLog("claim-htlc-timeout", logSuccess = false) { Transactions.makeClaimHtlcTimeoutTx(remoteCommitTx.tx, outputs, commitment.localParams.dustLimit, localHtlcPubkey, remoteHtlcPubkey, remoteRevocationPubkey, finalScriptPubKey, add, feeratePerKwHtlc, commitment.params.commitmentFormat) }.map(claimHtlcTx => { @@ -1267,36 +1286,37 @@ object Helpers { * lockTime (thanks to the use of sighash_single | sighash_anyonecanpay), so we may need to claim multiple outputs. */ def claimHtlcTxOutputs(keyManager: ChannelKeyManager, params: ChannelParams, remotePerCommitmentSecrets: ShaChain, revokedCommitPublished: RevokedCommitPublished, htlcTx: Transaction, feerates: FeeratesPerKw, finalScriptPubKey: ByteVector)(implicit log: LoggingAdapter): (RevokedCommitPublished, Seq[ClaimHtlcDelayedOutputPenaltyTx]) = { - val isHtlcTx = htlcTx.txIn.map(_.outPoint.txid).contains(revokedCommitPublished.commitTx.txid) && - htlcTx.txIn.map(_.witness).collect(Scripts.extractPreimageFromHtlcSuccess.orElse(Scripts.extractPaymentHashFromHtlcTimeout)).nonEmpty - if (isHtlcTx) { - log.info(s"looks like txid=${htlcTx.txid} could be a 2nd level htlc tx spending revoked commit txid=${revokedCommitPublished.commitTx.txid}") - // Let's assume that htlcTx is an HtlcSuccessTx or HtlcTimeoutTx and try to generate a tx spending its output using a revocation key + // We published HTLC-penalty transactions for every HTLC output: this transaction may be ours, or it may be one + // of their HTLC transactions that confirmed before our HTLC-penalty transaction. If it is spending an HTLC + // output, we assume that it's an HTLC transaction published by our peer and try to create penalty transactions + // that spend it, which will automatically be skipped if this was instead one of our HTLC-penalty transactions. + val htlcOutputs = revokedCommitPublished.htlcPenaltyTxs.map(_.input.outPoint).toSet + val spendsHtlcOutput = htlcTx.txIn.exists(txIn => htlcOutputs.contains(txIn.outPoint)) + if (spendsHtlcOutput) { import params._ val commitTx = revokedCommitPublished.commitTx val obscuredTxNumber = Transactions.decodeTxNumber(commitTx.txIn.head.sequence, commitTx.lockTime) val channelKeyPath = keyManager.keyPath(localParams, channelConfig) val localPaymentPoint = localParams.walletStaticPaymentBasepoint.getOrElse(keyManager.paymentPoint(channelKeyPath).publicKey) - // this tx has been published by remote, so we need to invert local/remote params + // If this tx has been published by the remote, we need to invert local/remote params. val txNumber = Transactions.obscuredCommitTxNumber(obscuredTxNumber, !localParams.isChannelOpener, remoteParams.paymentBasepoint, localPaymentPoint) - // now we know what commit number this tx is referring to, we can derive the commitment point from the shachain + // Now we know what commit number this tx is referring to, we can derive the commitment point from the shachain. remotePerCommitmentSecrets.getHash(0xFFFFFFFFFFFFL - txNumber) .map(d => PrivateKey(d)) .map(remotePerCommitmentSecret => { val remotePerCommitmentPoint = remotePerCommitmentSecret.publicKey val remoteDelayedPaymentPubkey = Generators.derivePubKey(remoteParams.delayedPaymentBasepoint, remotePerCommitmentPoint) val remoteRevocationPubkey = Generators.revocationPubKey(keyManager.revocationPoint(channelKeyPath).publicKey, remotePerCommitmentPoint) - - // we need to use a high fee here for punishment txs because after a delay they can be spent by the counterparty + // We need to use a high fee when spending HTLC txs because after a delay they can also be spent by the counterparty. val feeratePerKwPenalty = feerates.fastest - val penaltyTxs = Transactions.makeClaimHtlcDelayedOutputPenaltyTxs(htlcTx, localParams.dustLimit, remoteRevocationPubkey, localParams.toSelfDelay, remoteDelayedPaymentPubkey, finalScriptPubKey, feeratePerKwPenalty).flatMap(claimHtlcDelayedOutputPenaltyTx => { withTxGenerationLog("htlc-delayed-penalty") { claimHtlcDelayedOutputPenaltyTx.map(htlcDelayedPenalty => { val sig = keyManager.sign(htlcDelayedPenalty, keyManager.revocationPoint(channelKeyPath), remotePerCommitmentSecret, TxOwner.Local, commitmentFormat) val signedTx = Transactions.addSigs(htlcDelayedPenalty, sig) - // we need to make sure that the tx is indeed valid + // We need to make sure that the tx is indeed valid. Transaction.correctlySpends(signedTx.tx, Seq(htlcTx), ScriptFlags.STANDARD_SCRIPT_VERIFY_FLAGS) + log.warning("txId={} is a 2nd level htlc tx spending revoked commit txId={}: publishing htlc-penalty txId={}", htlcTx.txid, revokedCommitPublished.commitTx.txid, signedTx.tx.txid) signedTx }) } @@ -1322,11 +1342,11 @@ object Helpers { * - preimage needs to be sent to the upstream channel */ def extractPreimages(commitment: FullCommitment, tx: Transaction)(implicit log: LoggingAdapter): Set[(UpdateAddHtlc, ByteVector32)] = { - val htlcSuccess = tx.txIn.map(_.witness).collect(Scripts.extractPreimageFromHtlcSuccess) - htlcSuccess.foreach(r => log.info(s"extracted paymentPreimage=$r from tx=$tx (htlc-success)")) - val claimHtlcSuccess = tx.txIn.map(_.witness).collect(Scripts.extractPreimageFromClaimHtlcSuccess) - claimHtlcSuccess.foreach(r => log.info(s"extracted paymentPreimage=$r from tx=$tx (claim-htlc-success)")) - val paymentPreimages = (htlcSuccess ++ claimHtlcSuccess).toSet + val htlcSuccess = Scripts.extractPreimagesFromHtlcSuccess(tx) + htlcSuccess.foreach(r => log.info("extracted paymentPreimage={} from tx={} (htlc-success)", r, tx)) + val claimHtlcSuccess = Scripts.extractPreimagesFromClaimHtlcSuccess(tx) + claimHtlcSuccess.foreach(r => log.info("extracted paymentPreimage={} from tx={} (claim-htlc-success)", r, tx)) + val paymentPreimages = htlcSuccess ++ claimHtlcSuccess paymentPreimages.flatMap { paymentPreimage => val paymentHash = sha256(paymentPreimage) // We only care about outgoing HTLCs when we're trying to learn a preimage to relay upstream. @@ -1345,34 +1365,6 @@ object Helpers { } } - def isHtlcTimeout(tx: Transaction, localCommitPublished: LocalCommitPublished): Boolean = { - tx.txIn.filter(txIn => localCommitPublished.htlcTxs.get(txIn.outPoint) match { - case Some(Some(_: HtlcTimeoutTx)) => true - case _ => false - }).map(_.witness).collect(Scripts.extractPaymentHashFromHtlcTimeout).nonEmpty - } - - def isHtlcSuccess(tx: Transaction, localCommitPublished: LocalCommitPublished): Boolean = { - tx.txIn.filter(txIn => localCommitPublished.htlcTxs.get(txIn.outPoint) match { - case Some(Some(_: HtlcSuccessTx)) => true - case _ => false - }).map(_.witness).collect(Scripts.extractPreimageFromHtlcSuccess).nonEmpty - } - - def isClaimHtlcTimeout(tx: Transaction, remoteCommitPublished: RemoteCommitPublished): Boolean = { - tx.txIn.filter(txIn => remoteCommitPublished.claimHtlcTxs.get(txIn.outPoint) match { - case Some(Some(_: ClaimHtlcTimeoutTx)) => true - case _ => false - }).map(_.witness).collect(Scripts.extractPaymentHashFromClaimHtlcTimeout).nonEmpty - } - - def isClaimHtlcSuccess(tx: Transaction, remoteCommitPublished: RemoteCommitPublished): Boolean = { - tx.txIn.filter(txIn => remoteCommitPublished.claimHtlcTxs.get(txIn.outPoint) match { - case Some(Some(_: ClaimHtlcSuccessTx)) => true - case _ => false - }).map(_.witness).collect(Scripts.extractPreimageFromClaimHtlcSuccess).nonEmpty - } - /** * In CLOSING state, when we are notified that a transaction has been confirmed, we analyze it to find out if one or * more htlcs have timed out and need to be failed in an upstream channel. Trimmed htlcs can be failed as soon as @@ -1384,18 +1376,20 @@ object Helpers { def trimmedOrTimedOutHtlcs(commitmentFormat: CommitmentFormat, localCommit: LocalCommit, localCommitPublished: LocalCommitPublished, localDustLimit: Satoshi, tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateAddHtlc] = { val untrimmedHtlcs = Transactions.trimOfferedHtlcs(localDustLimit, localCommit.spec, commitmentFormat).map(_.add) if (tx.txid == localCommit.commitTxAndRemoteSig.commitTx.tx.txid) { - // the tx is a commitment tx, we can immediately fail all dust htlcs (they don't have an output in the tx) + // The commitment tx is confirmed: we can immediately fail all dust htlcs (they don't have an output in the tx). localCommit.spec.htlcs.collect(outgoing) -- untrimmedHtlcs } else { - // maybe this is a timeout tx, in that case we can resolve and fail the corresponding htlc + // Maybe this is a timeout tx: in that case we can resolve and fail the corresponding htlc. tx.txIn.flatMap(txIn => localCommitPublished.htlcTxs.get(txIn.outPoint) match { - case Some(Some(HtlcTimeoutTx(_, _, htlcId, _))) if isHtlcTimeout(tx, localCommitPublished) => + // This may also be our peer claiming the HTLC by revealing the preimage: in that case we have already + // extracted the preimage with [[extractPreimages]] and relayed it upstream. + case Some(Some(HtlcTimeoutTx(_, _, htlcId, _))) if Scripts.extractPreimagesFromClaimHtlcSuccess(tx).isEmpty => untrimmedHtlcs.find(_.id == htlcId) match { case Some(htlc) => - log.info(s"htlc-timeout tx for htlc #$htlcId paymentHash=${htlc.paymentHash} expiry=${tx.lockTime} has been confirmed (tx=$tx)") + log.info("htlc-timeout tx for htlc #{} paymentHash={} expiry={} has been confirmed (tx={})", htlcId, htlc.paymentHash, tx.lockTime, tx) Some(htlc) case None => - log.error(s"could not find htlc #$htlcId for htlc-timeout tx=$tx") + log.error("could not find htlc #{} for htlc-timeout tx={}", htlcId, tx) None } case _ => None @@ -1414,18 +1408,20 @@ object Helpers { def trimmedOrTimedOutHtlcs(commitmentFormat: CommitmentFormat, remoteCommit: RemoteCommit, remoteCommitPublished: RemoteCommitPublished, remoteDustLimit: Satoshi, tx: Transaction)(implicit log: LoggingAdapter): Set[UpdateAddHtlc] = { val untrimmedHtlcs = Transactions.trimReceivedHtlcs(remoteDustLimit, remoteCommit.spec, commitmentFormat).map(_.add) if (tx.txid == remoteCommit.txid) { - // the tx is a commitment tx, we can immediately fail all dust htlcs (they don't have an output in the tx) + // The commitment tx is confirmed: we can immediately fail all dust htlcs (they don't have an output in the tx). remoteCommit.spec.htlcs.collect(incoming) -- untrimmedHtlcs } else { - // maybe this is a timeout tx, in that case we can resolve and fail the corresponding htlc + // Maybe this is a timeout tx: in that case we can resolve and fail the corresponding htlc. tx.txIn.flatMap(txIn => remoteCommitPublished.claimHtlcTxs.get(txIn.outPoint) match { - case Some(Some(ClaimHtlcTimeoutTx(_, _, htlcId, _))) if isClaimHtlcTimeout(tx, remoteCommitPublished) => + // This may also be our peer claiming the HTLC by revealing the preimage: in that case we have already + // extracted the preimage with [[extractPreimages]] and relayed it upstream. + case Some(Some(ClaimHtlcTimeoutTx(_, _, htlcId, _))) if Scripts.extractPreimagesFromHtlcSuccess(tx).isEmpty => untrimmedHtlcs.find(_.id == htlcId) match { case Some(htlc) => - log.info(s"claim-htlc-timeout tx for htlc #$htlcId paymentHash=${htlc.paymentHash} expiry=${tx.lockTime} has been confirmed (tx=$tx)") + log.info("claim-htlc-timeout tx for htlc #{} paymentHash={} expiry={} has been confirmed (tx={})", htlcId, htlc.paymentHash, tx.lockTime, tx) Some(htlc) case None => - log.error(s"could not find htlc #$htlcId for claim-htlc-timeout tx=$tx") + log.error("could not find htlc #{} for claim-htlc-timeout tx={}", htlcId, tx) None } case _ => None @@ -1460,52 +1456,30 @@ object Helpers { val localCommit = d.commitments.latest.localCommit val remoteCommit = d.commitments.latest.remoteCommit val nextRemoteCommit_opt = d.commitments.latest.nextRemoteCommit_opt.map(_.commit) + // NB: from the p.o.v of remote, their incoming htlcs are our outgoing htlcs. + val outgoingHtlcs = localCommit.spec.htlcs.collect(outgoing) ++ (remoteCommit.spec.htlcs ++ nextRemoteCommit_opt.map(_.spec.htlcs).getOrElse(Set.empty)).collect(incoming) if (localCommit.commitTxAndRemoteSig.commitTx.tx.txid == tx.txid) { - // our commit got confirmed, so any htlc that is in their commitment but not in ours will never reach the chain - val htlcsInRemoteCommit = remoteCommit.spec.htlcs ++ nextRemoteCommit_opt.map(_.spec.htlcs).getOrElse(Set.empty) - // NB: from the p.o.v of remote, their incoming htlcs are our outgoing htlcs - htlcsInRemoteCommit.collect(incoming) -- localCommit.spec.htlcs.collect(outgoing) + // Our commit got confirmed: any htlc that is *not* in our commit will never reach the chain. + outgoingHtlcs -- localCommit.spec.htlcs.collect(outgoing) } else if (d.revokedCommitPublished.map(_.commitTx.txid).contains(tx.txid)) { - // a revoked commitment got confirmed: we will claim its outputs, but we also need to fail htlcs that are pending in the latest commitment: - // - outgoing htlcs that are in the local commitment but not in remote/nextRemote have already been fulfilled/failed so we don't care about them - // - outgoing htlcs that are in the remote/nextRemote commitment may not really be overridden, but since we are going to claim their output as a - // punishment we will never get the preimage and may as well consider them failed in the context of relaying htlcs - nextRemoteCommit_opt.getOrElse(remoteCommit).spec.htlcs.collect(incoming) + // A revoked commitment got confirmed: we will claim its outputs, but we also need to resolve upstream htlcs. + // We consider *all* outgoing htlcs failed: our peer may reveal the preimage with an HTLC-success transaction, + // but it's more likely that our penalty transaction will confirm first. In any case, since we will get those + // funds back on-chain, it's as if the outgoing htlc had failed, therefore it doesn't hurt to be failed back + // upstream. In the best case scenario, we already fulfilled upstream, then the fail will be a no-op and we + // will pocket the htlc amount. + outgoingHtlcs } else if (remoteCommit.txid == tx.txid) { - // their commit got confirmed - nextRemoteCommit_opt match { - case Some(nextRemoteCommit) => - // we had signed a new commitment but they committed the previous one - // any htlc that we signed in the new commitment that they didn't sign will never reach the chain - nextRemoteCommit.spec.htlcs.collect(incoming) -- localCommit.spec.htlcs.collect(outgoing) - case None => - // their last commitment got confirmed, so no htlcs will be overridden, they will timeout or be fulfilled on chain - Set.empty - } + // Their current commit got confirmed: any htlc that is *not* in their current commit will never reach the chain. + outgoingHtlcs -- remoteCommit.spec.htlcs.collect(incoming) } else if (nextRemoteCommit_opt.map(_.txid).contains(tx.txid)) { - // we must fail htlcs that have been removed from the next commitment - recentlyFailedHtlcs(remoteCommit, nextRemoteCommit_opt, d.commitments.changes) + // Their next commit got confirmed: any htlc that is *not* in their next commit will never reach the chain. + outgoingHtlcs -- nextRemoteCommit_opt.map(_.spec.htlcs).getOrElse(Set.empty).collect(incoming) } else { Set.empty } } - /** - * Returns HTLCs that have been failed and removed from the next remote commitment. - * We need to propagate their failure upstream if we don't receive the remote signature to remove them from our local commitment. - */ - def recentlyFailedHtlcs(remoteCommit: RemoteCommit, nextRemoteCommit_opt: Option[RemoteCommit], changes: CommitmentChanges): Set[UpdateAddHtlc] = { - // Incoming htlcs that have been removed from their commitment are either fulfilled or failed: - // - if they were fulfilled, we already relayed the preimage upstream - // - if they were failed, we need to relay the failure upstream since those htlcs will never reach the chain - val settledHtlcs = remoteCommit.spec.htlcs.collect(incoming) -- nextRemoteCommit_opt.map(_.spec.htlcs.collect(incoming)).getOrElse(Set.empty) - val failedHtlcs = changes.remoteChanges.all.collect { - case f: UpdateFailHtlc => f.id - case f: UpdateFailMalformedHtlc => f.id - }.toSet - settledHtlcs.filter(htlc => failedHtlcs.contains(htlc.id)) - } - /** * In CLOSING state, when we are notified that a transaction has been confirmed, we check if this tx belongs in the * local commit scenario and keep track of it. diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala b/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala index e3ccf77370..8b96e89029 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/channel/fsm/Channel.scala @@ -2001,15 +2001,21 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with handleRemoteSpentNext(tx, d1) } else { // Our counterparty is trying to broadcast a revoked commit tx (cheating attempt). - // We need to fail pending outgoing HTLCs: we must do it here because we're overwriting the commitments data, so we won't be able to do it afterwards. - val remoteCommit = d.commitments.latest.remoteCommit - val nextRemoteCommit_opt = d.commitments.latest.nextRemoteCommit_opt.map(_.commit) - val pendingOutgoingHtlcs = nextRemoteCommit_opt.getOrElse(remoteCommit).spec.htlcs.collect(DirectedHtlc.incoming) - val failedHtlcs = Closing.recentlyFailedHtlcs(remoteCommit, nextRemoteCommit_opt, d.commitments.changes) - (pendingOutgoingHtlcs ++ failedHtlcs).foreach { add => + // We need to fail pending outgoing HTLCs, otherwise they will timeout upstream. + // We must do it here because since we're overwriting the commitments data, we will lose all information + // about HTLCs that are in the current commitments but were not in the revoked one. + // We fail *all* outgoing HTLCs: + // - those that are not in the revoked commitment will never settle on-chain + // - those that are in the revoked commitment will be claimed on-chain, so it's as if they were failed + // Note that if we already received the preimage for some of these HTLCs, we already relayed it upstream + // so the fail command will be a no-op. + val outgoingHtlcs = d.commitments.latest.localCommit.spec.htlcs.collect(DirectedHtlc.outgoing) ++ + d.commitments.latest.remoteCommit.spec.htlcs.collect(DirectedHtlc.incoming) ++ + d.commitments.latest.nextRemoteCommit_opt.map(_.commit.spec.htlcs.collect(DirectedHtlc.incoming)).getOrElse(Set.empty) + outgoingHtlcs.foreach { add => d.commitments.originChannels.get(add.id) match { case Some(origin) => - log.info(s"failing htlc #${add.id} paymentHash=${add.paymentHash} origin=$origin: overridden by revoked remote commit") + log.info("failing htlc #{} paymentHash={} origin={}: overridden by revoked remote commit", add.id, add.paymentHash, origin) relayer ! RES_ADD_SETTLED(origin, add, HtlcResult.OnChainFail(HtlcOverriddenByLocalCommit(d.channelId, add))) case None => () } @@ -2017,7 +2023,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with handleRemoteSpentOther(tx, d1) } case None => - log.warning(s"ignoring unrecognized alternative commit tx=${tx.txid}") + log.warning("ignoring unrecognized alternative commit tx={}", tx.txid) stay() } @@ -2028,20 +2034,22 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with // when a remote or local commitment tx containing outgoing htlcs is published on the network, // we watch it in order to extract payment preimage if funds are pulled by the counterparty // we can then use these preimages to fulfill origin htlcs - log.debug(s"processing bitcoin output spent by txid=${tx.txid} tx=$tx") + log.debug(s"processing bitcoin output spent by txid={} tx={}", tx.txid, tx) val extracted = Closing.extractPreimages(d.commitments.latest, tx) extracted.foreach { case (htlc, preimage) => d.commitments.originChannels.get(htlc.id) match { case Some(origin) => - log.info(s"fulfilling htlc #${htlc.id} paymentHash=${htlc.paymentHash} origin=$origin") + log.info("fulfilling htlc #{} paymentHash={} origin={}", htlc.id, htlc.paymentHash, origin) relayer ! RES_ADD_SETTLED(origin, htlc, HtlcResult.OnChainFulfill(preimage)) case None => // if we don't have the origin, it means that we already have forwarded the fulfill so that's not a big deal. // this can happen if they send a signature containing the fulfill, then fail the channel before we have time to sign it - log.info(s"cannot fulfill htlc #${htlc.id} paymentHash=${htlc.paymentHash} (origin not found)") + log.warning("cannot fulfill htlc #{} paymentHash={} (origin not found)", htlc.id, htlc.paymentHash) } } val revokedCommitPublished1 = d.revokedCommitPublished.map { rev => + // this transaction may be an HTLC transaction spending a revoked commitment + // in that case, we immediately publish an HTLC-penalty transaction spending its output(s) val (rev1, penaltyTxs) = Closing.RevokedClose.claimHtlcTxOutputs(keyManager, d.commitments.params, d.commitments.remotePerCommitmentSecrets, rev, tx, nodeParams.currentBitcoinCoreFeerates, d.finalScriptPubKey) penaltyTxs.foreach(claimTx => txPublisher ! PublishFinalTx(claimTx, claimTx.fee, None)) penaltyTxs.foreach(claimTx => blockchain ! WatchOutputSpent(self, tx.txid, claimTx.input.outPoint.index.toInt, claimTx.amountIn, hints = Set(claimTx.tx.txid))) @@ -2050,7 +2058,7 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with stay() using d.copy(revokedCommitPublished = revokedCommitPublished1) storing() case Event(WatchTxConfirmedTriggered(blockHeight, _, tx), d: DATA_CLOSING) => - log.info(s"txid=${tx.txid} has reached mindepth, updating closing state") + log.info("txid={} has reached mindepth, updating closing state", tx.txid) context.system.eventStream.publish(TransactionConfirmed(d.channelId, remoteNodeId, tx)) // first we check if this tx belongs to one of the current local/remote commits, update it and update the channel data val d1 = d.copy( @@ -2101,27 +2109,31 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with val timedOutHtlcs = Closing.isClosingTypeAlreadyKnown(d1) match { case Some(c: Closing.LocalClose) => Closing.trimmedOrTimedOutHtlcs(d.commitments.params.commitmentFormat, c.localCommit, c.localCommitPublished, d.commitments.params.localParams.dustLimit, tx) case Some(c: Closing.RemoteClose) => Closing.trimmedOrTimedOutHtlcs(d.commitments.params.commitmentFormat, c.remoteCommit, c.remoteCommitPublished, d.commitments.params.remoteParams.dustLimit, tx) - case _ => Set.empty[UpdateAddHtlc] // we lose htlc outputs in dataloss protection scenarios (future remote commit) + case Some(_: Closing.RevokedClose) => Set.empty[UpdateAddHtlc] // revoked commitments are handled using [[overriddenOutgoingHtlcs]] below + case Some(_: Closing.RecoveryClose) => Set.empty[UpdateAddHtlc] // we lose htlc outputs in dataloss protection scenarios (future remote commit) + case Some(_: Closing.MutualClose) => Set.empty[UpdateAddHtlc] + case None => Set.empty[UpdateAddHtlc] } timedOutHtlcs.foreach { add => d.commitments.originChannels.get(add.id) match { case Some(origin) => - log.info(s"failing htlc #${add.id} paymentHash=${add.paymentHash} origin=$origin: htlc timed out") + log.info("failing htlc #{} paymentHash={} origin={}: htlc timed out", add.id, add.paymentHash, origin) relayer ! RES_ADD_SETTLED(origin, add, HtlcResult.OnChainFail(HtlcsTimedoutDownstream(d.channelId, Set(add)))) case None => // same as for fulfilling the htlc (no big deal) - log.info(s"cannot fail timed out htlc #${add.id} paymentHash=${add.paymentHash} (origin not found)") + log.info("cannot fail timed out htlc #{} paymentHash={} (origin not found)", add.id, add.paymentHash) } } // we also need to fail outgoing htlcs that we know will never reach the blockchain + // if we previously received the preimage, we have already relayed it upstream and the command below will be ignored Closing.overriddenOutgoingHtlcs(d, tx).foreach { add => d.commitments.originChannels.get(add.id) match { case Some(origin) => - log.info(s"failing htlc #${add.id} paymentHash=${add.paymentHash} origin=$origin: overridden by local commit") + log.info("failing htlc #{} paymentHash={} origin={}: overridden by local commit", add.id, add.paymentHash, origin) relayer ! RES_ADD_SETTLED(origin, add, HtlcResult.OnChainFail(HtlcOverriddenByLocalCommit(d.channelId, add))) case None => // same as for fulfilling the htlc (no big deal) - log.info(s"cannot fail overridden htlc #${add.id} paymentHash=${add.paymentHash} (origin not found)") + log.info("cannot fail overridden htlc #{} paymentHash={} (origin not found)", add.id, add.paymentHash) } } // for our outgoing payments, let's send events if we know that they will settle on chain @@ -2295,8 +2307,8 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with case _ => Set.empty } val lastFundingLockedTlvs: Set[ChannelReestablishTlv] = if (d.commitments.params.remoteParams.initFeatures.hasFeature(Features.SplicePrototype)) { - d.commitments.lastLocalLocked_opt.map(c => ChannelReestablishTlv.MyCurrentFundingLockedTlv(c.fundingTxId)).toSet ++ - d.commitments.lastRemoteLocked_opt.map(c => ChannelReestablishTlv.YourLastFundingLockedTlv(c.fundingTxId)).toSet + d.commitments.lastLocalLocked_opt.map(c => ChannelReestablishTlv.MyCurrentFundingLockedTlv(c.fundingTxId)).toSet ++ + d.commitments.lastRemoteLocked_opt.map(c => ChannelReestablishTlv.YourLastFundingLockedTlv(c.fundingTxId)).toSet } else Set.empty val channelReestablish = ChannelReestablish( @@ -2996,11 +3008,12 @@ class Channel(val nodeParams: NodeParams, val wallet: OnChainChannelFunder with /** Fail outgoing unsigned htlcs right away when transitioning from NORMAL to CLOSING */ onTransition { case NORMAL -> CLOSING => - (nextStateData: @unchecked) match { + nextStateData match { case d: DATA_CLOSING => d.commitments.changes.localChanges.proposed.collect { case add: UpdateAddHtlc => relayer ! RES_ADD_SETTLED(d.commitments.originChannels(add.id), add, HtlcResult.ChannelFailureBeforeSigned) } + case _ => () } } diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/db/PendingCommandsDb.scala b/eclair-core/src/main/scala/fr/acinq/eclair/db/PendingCommandsDb.scala index 240d209095..7c3ed2cd6d 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/db/PendingCommandsDb.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/db/PendingCommandsDb.scala @@ -23,35 +23,30 @@ import fr.acinq.eclair.channel._ import fr.acinq.eclair.wire.protocol.{UpdateFailHtlc, UpdateFailMalformedHtlc, UpdateFulfillHtlc, UpdateMessage} /** - * This database stores CMD_FULFILL_HTLC and CMD_FAIL_HTLC that we have received from downstream - * (either directly via UpdateFulfillHtlc or by extracting the value from the - * blockchain). + * This database stores [[CMD_FULFILL_HTLC]], [[CMD_FAIL_HTLC]] and [[CMD_FAIL_MALFORMED_HTLC]] commands received from + * downstream (either directly via channel settlement like [[UpdateFulfillHtlc]] or [[UpdateFailHtlc]] or by extracting + * the preimage from the blockchain during a force-close). * - * This means that this database is only used in the context of *relaying* payments. + * We must ensure that if a downstream channel is able to pull funds from us, we can always do the same from upstream, + * otherwise we lose money. Hence the need for persistence to handle all corner cases where we disconnect or restart + * before settling on the upstream channel. * - * We need to be sure that if downstream is able to pull funds from us, we can always - * do the same from upstream, otherwise we lose money. Hence the need for persistence - * to handle all corner cases. + * Importantly, we must only store the *first* command received for a given upstream HTLC: if we first receive + * [[CMD_FULFILL_HTLC]] and then [[CMD_FAIL_HTLC]], the second command must be ignored. This should be implemented by + * using a primary key based on the (channel_id, htlc_id) pair and ignoring conflicting inserts. * + * Note: this database is only used in the context of *relaying* payments. */ trait PendingCommandsDb { - + // @formatter:off def addSettlementCommand(channelId: ByteVector32, cmd: HtlcSettlementCommand): Unit - def removeSettlementCommand(channelId: ByteVector32, htlcId: Long): Unit - def listSettlementCommands(channelId: ByteVector32): Seq[HtlcSettlementCommand] - def listSettlementCommands(): Seq[(ByteVector32, HtlcSettlementCommand)] - + // @formatter:on } object PendingCommandsDb { - /** - * We store [[CMD_FULFILL_HTLC]]/[[CMD_FAIL_HTLC]]/[[CMD_FAIL_MALFORMED_HTLC]] - * in a database because we don't want to lose preimages, or to forget to fail - * incoming htlcs, which would lead to unwanted channel closings. - */ def safeSend(register: ActorRef, db: PendingCommandsDb, channelId: ByteVector32, cmd: HtlcSettlementCommand): Unit = { // htlc settlement commands don't have replyTo register ! Register.Forward(null, channelId, cmd) @@ -65,17 +60,17 @@ object PendingCommandsDb { def ackSettlementCommands(db: PendingCommandsDb, updates: List[UpdateMessage])(implicit log: LoggingAdapter): Unit = updates.collect { case u: UpdateFulfillHtlc => - log.debug(s"fulfill acked for htlcId=${u.id}") + log.debug("fulfill acked for htlcId={}", u.id) db.removeSettlementCommand(u.channelId, u.id) case u: UpdateFailHtlc => - log.debug(s"fail acked for htlcId=${u.id}") + log.debug("fail acked for htlcId={}", u.id) db.removeSettlementCommand(u.channelId, u.id) case u: UpdateFailMalformedHtlc => - log.debug(s"fail-malformed acked for htlcId=${u.id}") + log.debug("fail-malformed acked for htlcId={}", u.id) db.removeSettlementCommand(u.channelId, u.id) } - def getSettlementCommands(db: PendingCommandsDb, channelId: ByteVector32)(implicit log: LoggingAdapter): Seq[HtlcSettlementCommand] = { + def getSettlementCommands(db: PendingCommandsDb, channelId: ByteVector32): Seq[HtlcSettlementCommand] = { db.listSettlementCommands(channelId) } } \ No newline at end of file diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/payment/relay/PostRestartHtlcCleaner.scala b/eclair-core/src/main/scala/fr/acinq/eclair/payment/relay/PostRestartHtlcCleaner.scala index 48b73bc029..b8f9c5810f 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/payment/relay/PostRestartHtlcCleaner.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/payment/relay/PostRestartHtlcCleaner.scala @@ -28,7 +28,7 @@ import fr.acinq.eclair.db._ import fr.acinq.eclair.payment.Monitoring.Tags import fr.acinq.eclair.payment.{ChannelPaymentRelayed, IncomingPaymentPacket, PaymentFailed, PaymentSent} import fr.acinq.eclair.transactions.DirectedHtlc.outgoing -import fr.acinq.eclair.wire.protocol.{FailureMessage, FailureReason, InvalidOnionBlinding, TemporaryNodeFailure, UpdateAddHtlc} +import fr.acinq.eclair.wire.protocol._ import fr.acinq.eclair.{CustomCommitmentsPlugin, Feature, Features, Logs, MilliSatoshiLong, NodeParams, TimestampMilli} import scala.concurrent.Promise @@ -407,7 +407,7 @@ object PostRestartHtlcCleaner { val htlcsOut = channels .collect { case c: ChannelDataWithCommitments => c } .flatMap { c => - // Filter out HTLCs that will never reach the blockchain or have already been timed-out on-chain. + // Filter out HTLCs that will never reach the blockchain or have already been settled on-chain. val htlcsToIgnore: Set[Long] = c match { case d: DATA_CLOSING => val closingType_opt = Closing.isClosingTypeAlreadyKnown(d) @@ -415,23 +415,27 @@ object PostRestartHtlcCleaner { case Some(c: Closing.LocalClose) => Closing.overriddenOutgoingHtlcs(d, c.localCommitPublished.commitTx) case Some(c: Closing.RemoteClose) => Closing.overriddenOutgoingHtlcs(d, c.remoteCommitPublished.commitTx) case Some(c: Closing.RevokedClose) => Closing.overriddenOutgoingHtlcs(d, c.revokedCommitPublished.commitTx) - case _ => Set.empty[UpdateAddHtlc] + case Some(c: Closing.RecoveryClose) => Closing.overriddenOutgoingHtlcs(d, c.remoteCommitPublished.commitTx) + case Some(_: Closing.MutualClose) => Set.empty[UpdateAddHtlc] + case None => Set.empty[UpdateAddHtlc] }).map(_.id) - val irrevocablySpent = closingType_opt match { - case Some(c: Closing.LocalClose) => c.localCommitPublished.irrevocablySpent.values.toSeq - case Some(c: Closing.RemoteClose) => c.remoteCommitPublished.irrevocablySpent.values.toSeq - case Some(c: Closing.RevokedClose) => c.revokedCommitPublished.irrevocablySpent.values.toSeq - case _ => Nil + val confirmedTxs = closingType_opt match { + case Some(c: Closing.LocalClose) => c.localCommitPublished.irrevocablySpent.values.toSet + case Some(c: Closing.RemoteClose) => c.remoteCommitPublished.irrevocablySpent.values.toSet + case Some(c: Closing.RevokedClose) => c.revokedCommitPublished.irrevocablySpent.values.toSet + case Some(c: Closing.RecoveryClose) => c.remoteCommitPublished.irrevocablySpent.values.toSet + case Some(_: Closing.MutualClose) => Set.empty + case None => Set.empty } + val params = d.commitments.params val timedOutHtlcs: Set[Long] = (closingType_opt match { - case Some(c: Closing.LocalClose) => - val confirmedTxs = c.localCommitPublished.commitTx +: irrevocablySpent.filter(tx => Closing.isHtlcTimeout(tx, c.localCommitPublished)) - confirmedTxs.flatMap(tx => Closing.trimmedOrTimedOutHtlcs(d.commitments.params.commitmentFormat, c.localCommit, c.localCommitPublished, d.commitments.params.localParams.dustLimit, tx)) - case Some(c: Closing.RemoteClose) => - val confirmedTxs = c.remoteCommitPublished.commitTx +: irrevocablySpent.filter(tx => Closing.isClaimHtlcTimeout(tx, c.remoteCommitPublished)) - confirmedTxs.flatMap(tx => Closing.trimmedOrTimedOutHtlcs(d.commitments.params.commitmentFormat, c.remoteCommit, c.remoteCommitPublished, d.commitments.params.remoteParams.dustLimit, tx)) - case _ => Seq.empty[UpdateAddHtlc] - }).map(_.id).toSet + case Some(c: Closing.LocalClose) => confirmedTxs.flatMap(tx => Closing.trimmedOrTimedOutHtlcs(params.commitmentFormat, c.localCommit, c.localCommitPublished, params.localParams.dustLimit, tx)) + case Some(c: Closing.RemoteClose) => confirmedTxs.flatMap(tx => Closing.trimmedOrTimedOutHtlcs(params.commitmentFormat, c.remoteCommit, c.remoteCommitPublished, params.remoteParams.dustLimit, tx)) + case Some(_: Closing.RevokedClose) => Set.empty // revoked commitments are handled using [[overriddenOutgoingHtlcs]] above + case Some(_: Closing.RecoveryClose) => Set.empty // we lose htlc outputs in dataloss protection scenarios (future remote commit) + case Some(_: Closing.MutualClose) => Set.empty + case None => Set.empty + }).map(_.id) overriddenHtlcs ++ timedOutHtlcs case _ => Set.empty } diff --git a/eclair-core/src/main/scala/fr/acinq/eclair/transactions/Scripts.scala b/eclair-core/src/main/scala/fr/acinq/eclair/transactions/Scripts.scala index 34aaf3a5a7..6137d9e7f0 100644 --- a/eclair-core/src/main/scala/fr/acinq/eclair/transactions/Scripts.scala +++ b/eclair-core/src/main/scala/fr/acinq/eclair/transactions/Scripts.scala @@ -226,6 +226,9 @@ object Scripts { case ScriptWitness(Seq(ByteVector.empty, _, _, paymentPreimage, _)) if paymentPreimage.size == 32 => ByteVector32(paymentPreimage) } + /** Extract payment preimages from a (potentially batched) 2nd-stage HTLC transaction's witnesses. */ + def extractPreimagesFromHtlcSuccess(tx: Transaction): Set[ByteVector32] = tx.txIn.map(_.witness).collect(extractPreimageFromHtlcSuccess).toSet + /** * If remote publishes its commit tx where there was a remote->local htlc, then local uses this script to * claim its funds using a payment preimage (consumes htlcOffered script from commit tx) @@ -238,6 +241,9 @@ object Scripts { case ScriptWitness(Seq(_, paymentPreimage, _)) if paymentPreimage.size == 32 => ByteVector32(paymentPreimage) } + /** Extract payment preimages from a (potentially batched) claim HTLC transaction's witnesses. */ + def extractPreimagesFromClaimHtlcSuccess(tx: Transaction): Set[ByteVector32] = tx.txIn.map(_.witness).collect(extractPreimageFromClaimHtlcSuccess).toSet + def htlcReceived(localHtlcPubkey: PublicKey, remoteHtlcPubkey: PublicKey, revocationPubKey: PublicKey, paymentHash: ByteVector, lockTime: CltvExpiry, commitmentFormat: CommitmentFormat): Seq[ScriptElt] = { val addCsvDelay = commitmentFormat match { case DefaultCommitmentFormat => false @@ -274,11 +280,6 @@ object Scripts { def witnessHtlcTimeout(localSig: ByteVector64, remoteSig: ByteVector64, htlcOfferedScript: ByteVector, commitmentFormat: CommitmentFormat) = ScriptWitness(ByteVector.empty :: der(remoteSig, htlcRemoteSighash(commitmentFormat)) :: der(localSig) :: ByteVector.empty :: htlcOfferedScript :: Nil) - /** Extract the payment hash from a 2nd-stage HTLC Timeout transaction's witness script */ - def extractPaymentHashFromHtlcTimeout: PartialFunction[ScriptWitness, ByteVector] = { - case ScriptWitness(Seq(ByteVector.empty, _, _, ByteVector.empty, htlcOfferedScript)) => htlcOfferedScript.slice(109, 109 + 20) - } - /** * If remote publishes its commit tx where there was a local->remote htlc, then local uses this script to * claim its funds after timeout (consumes htlcReceived script from commit tx) @@ -286,11 +287,6 @@ object Scripts { def witnessClaimHtlcTimeoutFromCommitTx(localSig: ByteVector64, htlcReceivedScript: ByteVector) = ScriptWitness(der(localSig) :: ByteVector.empty :: htlcReceivedScript :: Nil) - /** Extract the payment hash from a timed-out received htlc. */ - def extractPaymentHashFromClaimHtlcTimeout: PartialFunction[ScriptWitness, ByteVector] = { - case ScriptWitness(Seq(_, ByteVector.empty, htlcReceivedScript)) => htlcReceivedScript.slice(69, 69 + 20) - } - /** * This witness script spends (steals) a [[htlcOffered]] or [[htlcReceived]] output using a revocation key as a punishment * for having published a revoked transaction @@ -350,7 +346,7 @@ object Scripts { * miniscript: this is not miniscript compatible * * @param localDelayedPaymentPubkey local delayed key - * @param revocationPubkey revocation key + * @param revocationPubkey revocation key * @return a script that will be used to add a "revocation" leaf to a script tree */ private def toRevocationKey(localDelayedPaymentPubkey: PublicKey, revocationPubkey: PublicKey): Seq[ScriptElt] = { diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/channel/HelpersSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/channel/HelpersSpec.scala index d8070b7264..e511ba7030 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/channel/HelpersSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/channel/HelpersSpec.scala @@ -103,47 +103,6 @@ class HelpersSpec extends TestKitBaseClass with AnyFunSuiteLike with ChannelStat Fixture(alice, lcp, Set(htlca1a, htlca1b, htlca2), bob, rcp, Set(htlcb1a, htlcb1b, htlcb2), probe) } - def identifyHtlcs(f: Fixture): Unit = { - import f._ - - val htlcTimeoutTxs = getHtlcTimeoutTxs(aliceCommitPublished) - val htlcSuccessTxs = getHtlcSuccessTxs(aliceCommitPublished) - val claimHtlcTimeoutTxs = getClaimHtlcTimeoutTxs(bobCommitPublished) - val claimHtlcSuccessTxs = getClaimHtlcSuccessTxs(bobCommitPublished) - - // Valid txs should be detected: - htlcTimeoutTxs.foreach(tx => assert(Closing.isHtlcTimeout(tx.tx, aliceCommitPublished))) - htlcSuccessTxs.foreach(tx => assert(Closing.isHtlcSuccess(tx.tx, aliceCommitPublished))) - claimHtlcTimeoutTxs.foreach(tx => assert(Closing.isClaimHtlcTimeout(tx.tx, bobCommitPublished))) - claimHtlcSuccessTxs.foreach(tx => assert(Closing.isClaimHtlcSuccess(tx.tx, bobCommitPublished))) - - // Invalid txs should be rejected: - htlcSuccessTxs.foreach(tx => assert(!Closing.isHtlcTimeout(tx.tx, aliceCommitPublished))) - claimHtlcTimeoutTxs.foreach(tx => assert(!Closing.isHtlcTimeout(tx.tx, aliceCommitPublished))) - claimHtlcSuccessTxs.foreach(tx => assert(!Closing.isHtlcTimeout(tx.tx, aliceCommitPublished))) - htlcTimeoutTxs.foreach(tx => assert(!Closing.isHtlcSuccess(tx.tx, aliceCommitPublished))) - claimHtlcTimeoutTxs.foreach(tx => assert(!Closing.isHtlcSuccess(tx.tx, aliceCommitPublished))) - claimHtlcSuccessTxs.foreach(tx => assert(!Closing.isHtlcSuccess(tx.tx, aliceCommitPublished))) - htlcTimeoutTxs.foreach(tx => assert(!Closing.isClaimHtlcTimeout(tx.tx, bobCommitPublished))) - htlcSuccessTxs.foreach(tx => assert(!Closing.isClaimHtlcTimeout(tx.tx, bobCommitPublished))) - claimHtlcSuccessTxs.foreach(tx => assert(!Closing.isClaimHtlcTimeout(tx.tx, bobCommitPublished))) - htlcTimeoutTxs.foreach(tx => assert(!Closing.isClaimHtlcSuccess(tx.tx, bobCommitPublished))) - htlcSuccessTxs.foreach(tx => assert(!Closing.isClaimHtlcSuccess(tx.tx, bobCommitPublished))) - claimHtlcTimeoutTxs.foreach(tx => assert(!Closing.isClaimHtlcSuccess(tx.tx, bobCommitPublished))) - } - - test("identify htlc txs") { - identifyHtlcs(setupHtlcs()) - } - - test("identify htlc txs (anchor outputs)", Tag(ChannelStateTestsTags.AnchorOutputs)) { - identifyHtlcs(setupHtlcs(Set(ChannelStateTestsTags.AnchorOutputs))) - } - - test("identify htlc txs (anchor outputs zero fee htlc txs)", Tag(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs)) { - identifyHtlcs(setupHtlcs(Set(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs))) - } - def findTimedOutHtlcs(f: Fixture): Unit = { import f._ diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala index ec83a2c5f6..e8266fe095 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/ChannelStateTestsHelperMethods.scala @@ -183,7 +183,7 @@ trait ChannelStateTestsBase extends Assertions with Eventually { } def updateInitFeatures(nodeParamsA: NodeParams, nodeParamsB: NodeParams, tags: Set[String]): (NodeParams, NodeParams) = { - (nodeParamsA.copy(features = nodeParamsA.features + val nodeParamsA1 = nodeParamsA.copy(features = nodeParamsA.features .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DisableWumbo))(_.removed(Features.Wumbo)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.StaticRemoteKey))(_.updated(Features.StaticRemoteKey, FeatureSupport.Optional)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.AnchorOutputs))(_.updated(Features.StaticRemoteKey, FeatureSupport.Optional).updated(Features.AnchorOutputs, FeatureSupport.Optional)) @@ -194,8 +194,9 @@ trait ChannelStateTestsBase extends Assertions with Eventually { .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ZeroConf))(_.updated(Features.ZeroConf, FeatureSupport.Optional)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.ScidAlias))(_.updated(Features.ScidAlias, FeatureSupport.Optional)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DualFunding))(_.updated(Features.DualFunding, FeatureSupport.Optional)) - .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.SimpleClose))(_.updated(Features.SimpleClose, FeatureSupport.Optional))), - nodeParamsB.copy(features = nodeParamsB.features + .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.SimpleClose))(_.updated(Features.SimpleClose, FeatureSupport.Optional)) + ) + val nodeParamsB1 = nodeParamsB.copy(features = nodeParamsB.features .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DisableWumbo))(_.removed(Features.Wumbo)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.StaticRemoteKey))(_.updated(Features.StaticRemoteKey, FeatureSupport.Optional)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.AnchorOutputs))(_.updated(Features.StaticRemoteKey, FeatureSupport.Optional).updated(Features.AnchorOutputs, FeatureSupport.Optional)) @@ -208,7 +209,8 @@ trait ChannelStateTestsBase extends Assertions with Eventually { .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DualFunding))(_.updated(Features.DualFunding, FeatureSupport.Optional)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.SimpleClose))(_.updated(Features.SimpleClose, FeatureSupport.Optional)) .modify(_.activated).usingIf(tags.contains(ChannelStateTestsTags.DisableSplice))(_.removed(Features.SplicePrototype)) - )) + ) + (nodeParamsA1, nodeParamsB1) } def computeFeatures(setup: SetupFixture, tags: Set[String], channelFlags: ChannelFlags): (LocalParams, LocalParams, SupportedChannelType) = { @@ -388,7 +390,9 @@ trait ChannelStateTestsBase extends Assertions with Eventually { fundingTx } - def localOrigin(replyTo: ActorRef): Origin.Hot = Origin.Hot(replyTo, Upstream.Local(UUID.randomUUID())) + def localOrigin(replyTo: ActorRef): Origin.Hot = Origin.Hot(replyTo, localUpstream()) + + def localUpstream(): Upstream.Local = Upstream.Local(UUID.randomUUID()) def makeCmdAdd(amount: MilliSatoshi, destination: PublicKey, currentBlockHeight: BlockHeight): (ByteVector32, CMD_ADD_HTLC) = { makeCmdAdd(amount, CltvExpiryDelta(144), destination, randomBytes32(), currentBlockHeight, Upstream.Local(UUID.randomUUID())) @@ -419,15 +423,19 @@ trait ChannelStateTestsBase extends Assertions with Eventually { addHtlc(amount, CltvExpiryDelta(144), s, r, s2r, r2s) } + def addHtlc(amount: MilliSatoshi, s: TestFSMRef[ChannelState, ChannelData, Channel], r: TestFSMRef[ChannelState, ChannelData, Channel], s2r: TestProbe, r2s: TestProbe, upstream: Upstream.Hot): (ByteVector32, UpdateAddHtlc) = { + addHtlc(amount, CltvExpiryDelta(144), s, r, s2r, r2s, ActorRef.noSender, upstream) + } + def addHtlc(amount: MilliSatoshi, s: TestFSMRef[ChannelState, ChannelData, Channel], r: TestFSMRef[ChannelState, ChannelData, Channel], s2r: TestProbe, r2s: TestProbe, replyTo: ActorRef): (ByteVector32, UpdateAddHtlc) = { addHtlc(amount, CltvExpiryDelta(144), s, r, s2r, r2s, replyTo) } def addHtlc(amount: MilliSatoshi, cltvExpiryDelta: CltvExpiryDelta, s: TestFSMRef[ChannelState, ChannelData, Channel], r: TestFSMRef[ChannelState, ChannelData, Channel], s2r: TestProbe, r2s: TestProbe, replyTo: ActorRef = TestProbe().ref, upstream: Upstream.Hot = Upstream.Local(UUID.randomUUID())): (ByteVector32, UpdateAddHtlc) = { val currentBlockHeight = s.underlyingActor.nodeParams.currentBlockHeight - val (payment_preimage, cmd) = makeCmdAdd(amount, cltvExpiryDelta, r.underlyingActor.nodeParams.nodeId, randomBytes32(), currentBlockHeight, upstream, replyTo) + val (paymentPreimage, cmd) = makeCmdAdd(amount, cltvExpiryDelta, r.underlyingActor.nodeParams.nodeId, randomBytes32(), currentBlockHeight, upstream, replyTo) val htlc = addHtlc(cmd, s, r, s2r, r2s) - (payment_preimage, htlc) + (paymentPreimage, htlc) } def addHtlc(cmdAdd: CMD_ADD_HTLC, s: TestFSMRef[ChannelState, ChannelData, Channel], r: TestFSMRef[ChannelState, ChannelData, Channel], s2r: TestProbe, r2s: TestProbe): UpdateAddHtlc = { @@ -611,7 +619,7 @@ trait ChannelStateTestsBase extends Assertions with Eventually { val spentWatches = watchedOutputIndexes.map(_ => s2blockchain.expectMsgType[WatchOutputSpent]) spentWatches.foreach(ws => assert(ws.txId == commitTx.txid)) assert(spentWatches.map(_.outputIndex) == watchedOutputIndexes) - s2blockchain.expectNoMessage(1 second) + s2blockchain.expectNoMessage(100 millis) // s is now in CLOSING state with txs pending for confirmation before going in CLOSED state closingState.localCommitPublished.get @@ -654,7 +662,7 @@ trait ChannelStateTestsBase extends Assertions with Eventually { val spentWatches = htlcOutputIndexes.map(_ => s2blockchain.expectMsgType[WatchOutputSpent]) spentWatches.foreach(ws => assert(ws.txId == rCommitTx.txid)) assert(spentWatches.map(_.outputIndex) == htlcOutputIndexes) - s2blockchain.expectNoMessage(1 second) + s2blockchain.expectNoMessage(100 millis) // s is now in CLOSING state with txs pending for confirmation before going in CLOSED state remoteCommitPublished diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/e/NormalSplicesStateSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/e/NormalSplicesStateSpec.scala index 1307ad1a61..5d54b0fbff 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/e/NormalSplicesStateSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/e/NormalSplicesStateSpec.scala @@ -22,7 +22,7 @@ import akka.testkit.{TestFSMRef, TestProbe} import com.softwaremill.quicklens.ModifyPimp import fr.acinq.bitcoin.ScriptFlags import fr.acinq.bitcoin.scalacompat.NumericSatoshi.abs -import fr.acinq.bitcoin.scalacompat.{ByteVector32, Satoshi, SatoshiLong, Transaction, TxIn} +import fr.acinq.bitcoin.scalacompat.{ByteVector32, Satoshi, SatoshiLong, Transaction} import fr.acinq.eclair._ import fr.acinq.eclair.blockchain.SingleKeyOnChainWallet import fr.acinq.eclair.blockchain.bitcoind.ZmqWatcher._ @@ -2737,9 +2737,12 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik } // Bob's htlc-timeout txs confirm. + bob ! WatchFundingSpentTriggered(commitTx2) + val bobHtlcsTxsOut = htlcs.bobToAlice.map(_ => assertPublished(bob2blockchain, "claim-htlc-timeout")) val remoteOutpoints = alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.map(rcp => rcp.htlcTxs.filter(_._2.isEmpty).keys).toSeq.flatten assert(remoteOutpoints.size == htlcs.bobToAlice.size) - remoteOutpoints.foreach { out => alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, htlcsTxsOut.head.copy(txIn = Seq(TxIn(out, Nil, 0)))) } + assert(remoteOutpoints.toSet == bobHtlcsTxsOut.flatMap(_.txIn.map(_.outPoint)).toSet) + bobHtlcsTxsOut.foreach { tx => alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, tx) } alice2blockchain.expectNoMessage(100 millis) checkPostSpliceState(f, spliceOutFee(f, capacity = 1_900_000.sat)) @@ -2845,13 +2848,17 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik alice2bob.expectNoMessage(100 millis) bob2alice.expectNoMessage(100 millis) - // From Alice's point of view, We now have two unconfirmed splices, both active. - // Bob makes a payment, that applies to both commitments. - val (preimage, add) = addHtlc(10_000_000 msat, bob, alice, bob2alice, alice2bob) - crossSign(bob, alice, bob2alice, alice2bob) - alice2relayer.expectMsgType[Relayer.RelayForward] - fulfillHtlc(add.id, preimage, alice, bob, alice2bob, bob2alice) + // From Alice's point of view, we now have two unconfirmed splices, both active. + // They both send additional HTLCs, that apply to both commitments. + val (_, htlcIn) = addHtlc(10_000_000 msat, bob, alice, bob2alice, alice2bob) + val (_, htlcOut1) = addHtlc(20_000_000 msat, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) + assert(alice2relayer.expectMsgType[Relayer.RelayForward].add == htlcIn) + alice2relayer.expectNoMessage(100 millis) + // Alice adds another HTLC that isn't signed by Bob. + val (_, htlcOut2) = addHtlc(15_000_000 msat, alice, bob, alice2bob, bob2alice) + alice ! CMD_SIGN() + alice2bob.expectMsgType[CommitSig] // Bob ignores Alice's message // The first splice transaction confirms. alice ! WatchFundingConfirmedTriggered(BlockHeight(400000), 42, fundingTx1) @@ -2864,12 +2871,12 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik val aliceCommitTx2 = assertPublished(alice2blockchain, "commit-tx") assertPublished(alice2blockchain, "local-anchor") val claimMainDelayed2 = assertPublished(alice2blockchain, "local-main-delayed") - htlcs.aliceToBob.map(_ => assertPublished(alice2blockchain, "htlc-timeout")) + (htlcs.aliceToBob.map(_._2) ++ Seq(htlcOut1)).map(_ => assertPublished(alice2blockchain, "htlc-timeout")) alice2blockchain.expectWatchTxConfirmed(aliceCommitTx2.txid) alice2blockchain.expectWatchTxConfirmed(claimMainDelayed2.txid) alice2blockchain.expectMsgType[WatchOutputSpent] - htlcs.aliceToBob.map(_ => alice2blockchain.expectMsgType[WatchOutputSpent]) - htlcs.bobToAlice.map(_ => alice2blockchain.expectMsgType[WatchOutputSpent]) + (htlcs.aliceToBob.map(_._2) ++ Seq(htlcOut1)).map(_ => alice2blockchain.expectMsgType[WatchOutputSpent]) + (htlcs.bobToAlice.map(_._2) ++ Seq(htlcIn)).map(_ => alice2blockchain.expectMsgType[WatchOutputSpent]) // Bob's revoked commit tx wins. alice ! WatchAlternativeCommitTxConfirmedTriggered(BlockHeight(400000), 42, bobRevokedCommitTx) @@ -2884,15 +2891,18 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik aliceHtlcsPenalty.map(_ => alice2blockchain.expectMsgType[WatchOutputSpent]) alice2blockchain.expectNoMessage(100 millis) + // Alice sends a failure upstream for every outgoing HTLC, including the ones that don't appear in the revoked commitment. + val outgoingHtlcs = (htlcs.aliceToBob.map(_._2) ++ Set(htlcOut1, htlcOut2)).map(htlc => (htlc, alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id))) + val settledOutgoingHtlcs = outgoingHtlcs.map(_ => alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]).map(s => (s.htlc, s.origin)) + assert(outgoingHtlcs.toSet == settledOutgoingHtlcs.toSet) + alice2relayer.expectNoMessage(100 millis) + // Alice's penalty txs confirm. alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, bobRevokedCommitTx) alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, aliceClaimMain) alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, aliceMainPenalty) aliceHtlcsPenalty.foreach { tx => alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, tx) } - val settledOutgoingHtlcs = htlcs.aliceToBob.map(_ => alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc).toSet - assert(settledOutgoingHtlcs == htlcs.aliceToBob.map(_._2).toSet) - checkPostSpliceState(f, spliceOutFee = 0.sat) awaitCond(alice.stateName == CLOSED) assert(Helpers.Closing.isClosed(alice.stateData.asInstanceOf[DATA_CLOSING], None).exists(_.isInstanceOf[RevokedClose])) } @@ -3050,19 +3060,16 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.active.size == 1) assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.inactive.size == 2) - // bob makes a payment that is only applied to splice 2 - val (preimage, add) = addHtlc(10_000_000 msat, bob, alice, bob2alice, alice2bob) - crossSign(bob, alice, bob2alice, alice2bob) - alice2relayer.expectMsgType[Relayer.RelayForward] - fulfillHtlc(add.id, preimage, alice, bob, alice2bob, bob2alice) - crossSign(alice, bob, alice2bob, bob2alice) - bob2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]] - - // alice adds an outgoing htlc that is only applied to splice 2 - val pendingOutgoingHtlc = addHtlc(15_000_000 msat, alice, bob, alice2bob, bob2alice) + // They both send additional HTLCs, that only apply to splice 2. + val (_, htlcIn) = addHtlc(10_000_000 msat, bob, alice, bob2alice, alice2bob) + val (_, htlcOut1) = addHtlc(20_000_000 msat, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) - bob2relayer.expectMsgType[Relayer.RelayForward] - val htlcs1 = htlcs.copy(aliceToBob = htlcs.aliceToBob ++ Seq(pendingOutgoingHtlc)) + assert(alice2relayer.expectMsgType[Relayer.RelayForward].add == htlcIn) + alice2relayer.expectNoMessage(100 millis) + // Alice adds another HTLC that isn't signed by Bob. + val (_, htlcOut2) = addHtlc(15_000_000 msat, alice, bob, alice2bob, bob2alice) + alice ! CMD_SIGN() + alice2bob.expectMsgType[CommitSig] // Bob ignores Alice's message // funding tx1 confirms alice ! WatchFundingConfirmedTriggered(BlockHeight(400000), 42, fundingTx1) @@ -3078,13 +3085,12 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik val aliceCommitTx2 = assertPublished(alice2blockchain, "commit-tx") assertPublished(alice2blockchain, "local-anchor") val claimMainDelayed2 = assertPublished(alice2blockchain, "local-main-delayed") - htlcs1.aliceToBob.map(_ => assertPublished(alice2blockchain, "htlc-timeout")) - + (htlcs.aliceToBob.map(_._2) ++ Seq(htlcOut1)).map(_ => assertPublished(alice2blockchain, "htlc-timeout")) alice2blockchain.expectWatchTxConfirmed(aliceCommitTx2.txid) alice2blockchain.expectWatchTxConfirmed(claimMainDelayed2.txid) alice2blockchain.expectMsgType[WatchOutputSpent] // local-anchor - htlcs1.aliceToBob.foreach(_ => assert(alice2blockchain.expectMsgType[WatchOutputSpent].txId == aliceCommitTx2.txid)) - htlcs1.bobToAlice.foreach(_ => assert(alice2blockchain.expectMsgType[WatchOutputSpent].txId == aliceCommitTx2.txid)) + (htlcs.aliceToBob.map(_._2) ++ Seq(htlcOut1)).foreach(_ => assert(alice2blockchain.expectMsgType[WatchOutputSpent].txId == aliceCommitTx2.txid)) + (htlcs.bobToAlice.map(_._2) ++ Seq(htlcIn)).foreach(_ => assert(alice2blockchain.expectMsgType[WatchOutputSpent].txId == aliceCommitTx2.txid)) alice2blockchain.expectNoMessage(100 millis) // bob's revoked tx wins @@ -3100,6 +3106,12 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik awaitCond(wallet.asInstanceOf[SingleKeyOnChainWallet].abandoned.contains(fundingTx2.txid)) alice2blockchain.expectNoMessage(100 millis) + // Alice sends a failure upstream for every outgoing HTLC, including the ones that don't appear in the revoked commitment. + val outgoingHtlcs = (htlcs.aliceToBob.map(_._2) ++ Set(htlcOut1, htlcOut2)).map(htlc => (htlc, alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id))) + val settledOutgoingHtlcs = outgoingHtlcs.map(_ => alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]).map(s => (s.htlc, s.origin)) + assert(outgoingHtlcs.toSet == settledOutgoingHtlcs.toSet) + alice2relayer.expectNoMessage(100 millis) + // all penalty txs confirm alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, bobRevokedCommitTx) alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, aliceClaimMain) @@ -3107,13 +3119,7 @@ class NormalSplicesStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLik alice2blockchain.expectWatchTxConfirmed(aliceMainPenalty.txid) alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, aliceMainPenalty) aliceHtlcsPenalty.foreach { tx => alice ! WatchTxConfirmedTriggered(BlockHeight(400000), 42, tx) } - val settledOutgoingHtlcs = htlcs1.aliceToBob.map(_ => alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc).toSet - assert(settledOutgoingHtlcs == htlcs1.aliceToBob.map(_._2).toSet) - // alice's final commitment includes the initial htlcs, but not bob's payment - checkPostSpliceState(f, spliceOutFee = 0 sat) - - // done awaitCond(alice.stateName == CLOSED) assert(Helpers.Closing.isClosed(alice.stateData.asInstanceOf[DATA_CLOSING], None).exists(_.isInstanceOf[RevokedClose])) } diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/h/ClosingStateSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/h/ClosingStateSpec.scala index 52f7f8a04d..8cb3b22709 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/h/ClosingStateSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/channel/states/h/ClosingStateSpec.scala @@ -33,8 +33,8 @@ import fr.acinq.eclair.channel.states.ChannelStateTestsBase.PimpTestFSM import fr.acinq.eclair.channel.states.{ChannelStateTestsBase, ChannelStateTestsTags} import fr.acinq.eclair.payment._ import fr.acinq.eclair.payment.relay.Relayer._ +import fr.acinq.eclair.transactions.Transactions import fr.acinq.eclair.transactions.Transactions._ -import fr.acinq.eclair.transactions.{Scripts, Transactions} import fr.acinq.eclair.wire.protocol._ import fr.acinq.eclair.{BlockHeight, CltvExpiry, CltvExpiryDelta, Features, MilliSatoshiLong, TestConstants, TestKitBaseClass, TimestampSecond, randomBytes32, randomKey} import org.scalatest.Inside.inside @@ -94,12 +94,12 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with bob2blockchain.expectMsgType[WatchFundingConfirmed] awaitCond(alice.stateName == WAIT_FOR_FUNDING_CONFIRMED) awaitCond(bob.stateName == WAIT_FOR_FUNDING_CONFIRMED) - alice.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) - alice.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) - alice.underlying.system.eventStream.subscribe(eventListener.ref, classOf[ChannelAborted]) - bob.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) - bob.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) - bob.underlying.system.eventStream.subscribe(eventListener.ref, classOf[ChannelAborted]) + systemA.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) + systemA.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) + systemA.eventStream.subscribe(eventListener.ref, classOf[ChannelAborted]) + systemB.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) + systemB.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) + systemB.eventStream.subscribe(eventListener.ref, classOf[ChannelAborted]) withFixture(test.toNoArgTest(FixtureParam(alice, bob, alice2bob, bob2alice, alice2blockchain, bob2blockchain, alice2relayer, bob2relayer, channelUpdateListener, txListener, eventListener, Nil))) } } else { @@ -113,10 +113,10 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with bob2alice.forward(alice) bob2alice.expectMsgType[ChannelUpdate] } - alice.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) - alice.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) - bob.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) - bob.underlying.system.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) + systemA.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) + systemA.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) + systemB.eventStream.subscribe(txListener.ref, classOf[TransactionPublished]) + systemB.eventStream.subscribe(txListener.ref, classOf[TransactionConfirmed]) val bobCommitTxs: List[CommitTxAndRemoteSig] = (for (amt <- List(100000000 msat, 200000000 msat, 300000000 msat)) yield { val (r, htlc) = addHtlc(amt, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) @@ -127,7 +127,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.Fulfill]] crossSign(bob, alice, bob2alice, alice2bob) // bob confirms that it has forwarded the fulfill to alice - awaitCond(bob.underlyingActor.nodeParams.db.pendingCommands.listSettlementCommands(htlc.channelId).isEmpty) + awaitCond(bob.nodeParams.db.pendingCommands.listSettlementCommands(htlc.channelId).isEmpty) val bobCommitTx2 = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig bobCommitTx1 :: bobCommitTx2 :: Nil }).flatten @@ -184,8 +184,8 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // test starts here alice ! GetTxWithMetaResponse(fundingTx.txid, Some(fundingTx), TimestampSecond.now()) - alice2bob.expectNoMessage(200 millis) - alice2blockchain.expectNoMessage(200 millis) + alice2bob.expectNoMessage(100 millis) + alice2blockchain.expectNoMessage(100 millis) assert(alice.stateName == CLOSING) // the above expectNoMsg will make us wait, so this checks that we are still in CLOSING } @@ -204,7 +204,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // test starts here alice ! GetTxWithMetaResponse(fundingTx.txid, None, TimestampSecond.now()) - alice2bob.expectNoMessage(200 millis) + alice2bob.expectNoMessage(100 millis) assert(alice2blockchain.expectMsgType[PublishFinalTx].tx == fundingTx) // we republish the funding tx assert(alice.stateName == CLOSING) // the above expectNoMsg will make us wait, so this checks that we are still in CLOSING } @@ -224,8 +224,8 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // test starts here bob ! GetTxWithMetaResponse(fundingTx.txid, Some(fundingTx), TimestampSecond.now()) - bob2alice.expectNoMessage(200 millis) - bob2blockchain.expectNoMessage(200 millis) + bob2alice.expectNoMessage(100 millis) + bob2blockchain.expectNoMessage(100 millis) assert(bob.stateName == CLOSING) // the above expectNoMsg will make us wait, so this checks that we are still in CLOSING } @@ -244,8 +244,8 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // test starts here bob ! GetTxWithMetaResponse(fundingTx.txid, None, TimestampSecond.now()) - bob2alice.expectNoMessage(200 millis) - bob2blockchain.expectNoMessage(200 millis) + bob2alice.expectNoMessage(100 millis) + bob2blockchain.expectNoMessage(100 millis) assert(bob.stateName == CLOSING) // the above expectNoMsg will make us wait, so this checks that we are still in CLOSING } @@ -263,10 +263,10 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with eventListener.expectMsgType[ChannelAborted] // test starts here - bob.setState(stateData = bob.stateData.asInstanceOf[DATA_CLOSING].copy(waitingSince = bob.underlyingActor.nodeParams.currentBlockHeight - Channel.FUNDING_TIMEOUT_FUNDEE - 1)) + bob.setState(stateData = bob.stateData.asInstanceOf[DATA_CLOSING].copy(waitingSince = bob.nodeParams.currentBlockHeight - Channel.FUNDING_TIMEOUT_FUNDEE - 1)) bob ! GetTxWithMetaResponse(fundingTx.txid, None, TimestampSecond.now()) bob2alice.expectMsgType[Error] - bob2blockchain.expectNoMessage(200 millis) + bob2blockchain.expectNoMessage(100 millis) assert(bob.stateName == CLOSED) } @@ -280,7 +280,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice ! add val error = ChannelUnavailable(channelId(alice)) sender.expectMsg(RES_ADD_FAILED(add, error, None)) - alice2bob.expectNoMessage(200 millis) + alice2bob.expectNoMessage(100 millis) } test("recv CMD_FULFILL_HTLC (unexisting htlc)") { f => @@ -382,34 +382,278 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with inside(listener.expectMsgType[LocalChannelUpdate]) { u => assert(!u.channelUpdate.channelFlags.isEnabled) } } - test("recv WatchOutputSpentTriggered") { f => + private def extractPreimageFromClaimHtlcSuccess(f: FixtureParam): Unit = { import f._ - // alice sends an htlc to bob - val (ra1, htlca1) = addHtlc(50000000 msat, alice, bob, alice2bob, bob2alice) + + // Alice sends htlcs to Bob with the same payment_hash. + val (preimage, htlc1) = addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) + val htlc2 = addHtlc(makeCmdAdd(40_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, preimage)._2, alice, bob, alice2bob, bob2alice) + assert(htlc1.paymentHash == htlc2.paymentHash) crossSign(alice, bob, alice2bob, bob2alice) - bob2relayer.expectMsgType[RelayForward] - localClose(alice, alice2blockchain) + + // Bob has the preimage for those HTLCs, but Alice force-closes before receiving it. + bob ! CMD_FULFILL_HTLC(htlc1.id, preimage) + bob2alice.expectMsgType[UpdateFulfillHtlc] // ignored + val lcp = localClose(alice, alice2blockchain) val initialState = alice.stateData.asInstanceOf[DATA_CLOSING] - assert(initialState.localCommitPublished.isDefined) + assert(initialState.localCommitPublished.contains(lcp)) - // actual test starts here - channelUpdateListener.expectMsgType[LocalChannelDown] + // Bob claims the htlc output from Alice's commit tx using its preimage. + bob ! WatchFundingSpentTriggered(lcp.commitTx) + if (initialState.commitments.params.channelFeatures.hasFeature(Features.AnchorOutputsZeroFeeHtlcTx)) { + assert(bob2blockchain.expectMsgType[PublishReplaceableTx].txInfo.isInstanceOf[ClaimLocalAnchorOutputTx]) + bob2blockchain.expectMsgType[PublishFinalTx] // main-delayed + } + val claimHtlcSuccessTx1 = bob2blockchain.expectMsgType[PublishReplaceableTx] + assert(claimHtlcSuccessTx1.txInfo.isInstanceOf[ClaimHtlcSuccessTx]) + val claimHtlcSuccessTx2 = bob2blockchain.expectMsgType[PublishReplaceableTx] + assert(claimHtlcSuccessTx2.txInfo.isInstanceOf[ClaimHtlcSuccessTx]) + assert(claimHtlcSuccessTx1.input != claimHtlcSuccessTx2.input) + + // Alice extracts the preimage and forwards it upstream. + alice ! WatchOutputSpentTriggered(htlc1.amountMsat.truncateToSatoshi, claimHtlcSuccessTx1.txInfo.tx) + Seq(htlc1, htlc2).foreach(htlc => inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]]) { fulfill => + assert(fulfill.htlc == htlc) + assert(fulfill.result.paymentPreimage == preimage) + assert(fulfill.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id)) + }) + assert(alice.stateData == initialState) // this was a no-op - // scenario 1: bob claims the htlc output from the commit tx using its preimage - val claimHtlcSuccessFromCommitTx = Transaction(version = 2, txIn = TxIn(outPoint = OutPoint(randomTxId(), 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessClaimHtlcSuccessFromCommitTx(Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33))) :: Nil, txOut = Nil, lockTime = 0) - alice ! WatchOutputSpentTriggered(100_000 sat, claimHtlcSuccessFromCommitTx) - val fulfill1 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]] - assert(fulfill1.htlc == htlca1) - assert(fulfill1.result.paymentPreimage == ra1) + // The Claim-HTLC-success transaction confirms: nothing to do, preimage has already been relayed. + assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimHtlcSuccessTx1.txInfo.tx.txid) + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 6, claimHtlcSuccessTx1.txInfo.tx) + alice2blockchain.expectNoMessage(100 millis) + alice2relayer.expectNoMessage(100 millis) + } - // scenario 2: bob claims the htlc output from his own commit tx using its preimage (let's assume both parties had published their commitment tx) - val claimHtlcSuccessTx = Transaction(version = 2, txIn = TxIn(outPoint = OutPoint(randomTxId(), 0), signatureScript = ByteVector.empty, sequence = 0, witness = Scripts.witnessHtlcSuccess(Transactions.PlaceHolderSig, Transactions.PlaceHolderSig, ra1, ByteVector.fill(130)(33), Transactions.DefaultCommitmentFormat)) :: Nil, txOut = Nil, lockTime = 0) - alice ! WatchOutputSpentTriggered(100_000 sat, claimHtlcSuccessTx) - val fulfill2 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]] - assert(fulfill2.htlc == htlca1) - assert(fulfill2.result.paymentPreimage == ra1) + test("recv WatchOutputSpentTriggered (extract preimage from Claim-HTLC-success tx)") { f => + extractPreimageFromClaimHtlcSuccess(f) + } - assert(alice.stateData == initialState) // this was a no-op + test("recv WatchOutputSpentTriggered (extract preimage from Claim-HTLC-success tx, anchor outputs)", Tag(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs)) { f => + extractPreimageFromClaimHtlcSuccess(f) + } + + private def extractPreimageFromHtlcSuccess(f: FixtureParam): Unit = { + import f._ + + // Alice sends htlcs to Bob with the same payment_hash. + val (preimage, htlc1) = addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) + val htlc2 = addHtlc(makeCmdAdd(40_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, preimage)._2, alice, bob, alice2bob, bob2alice) + assert(htlc1.paymentHash == htlc2.paymentHash) + crossSign(alice, bob, alice2bob, bob2alice) + + // Bob has the preimage for those HTLCs, but he force-closes before Alice receives it. + bob ! CMD_FULFILL_HTLC(htlc1.id, preimage) + bob2alice.expectMsgType[UpdateFulfillHtlc] // ignored + val rcp = localClose(bob, bob2blockchain) + + // Bob claims the htlc outputs from his own commit tx using its preimage. + assert(rcp.htlcTxs.size == 2) + rcp.htlcTxs.values.foreach(tx_opt => assert(tx_opt.nonEmpty)) + val htlcSuccessTxs = rcp.htlcTxs.values.flatten + htlcSuccessTxs.foreach(tx => assert(tx.isInstanceOf[HtlcSuccessTx])) + + // Alice extracts the preimage and forwards it upstream. + alice ! WatchFundingSpentTriggered(rcp.commitTx) + alice ! WatchOutputSpentTriggered(htlc1.amountMsat.truncateToSatoshi, htlcSuccessTxs.head.tx) + Seq(htlc1, htlc2).foreach(htlc => inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]]) { fulfill => + assert(fulfill.htlc == htlc) + assert(fulfill.result.paymentPreimage == preimage) + assert(fulfill.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id)) + }) + + // The HTLC-success transaction confirms: nothing to do, preimage has already been relayed. + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 6, htlcSuccessTxs.head.tx) + alice2relayer.expectNoMessage(100 millis) + } + + test("recv WatchOutputSpentTriggered (extract preimage from HTLC-success tx)") { f => + extractPreimageFromHtlcSuccess(f) + } + + test("recv WatchOutputSpentTriggered (extract preimage from HTLC-success tx, anchor outputs)", Tag(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs)) { f => + extractPreimageFromHtlcSuccess(f) + } + + private def extractPreimageFromRemovedHtlc(f: FixtureParam): Unit = { + import f._ + + // Alice sends htlcs to Bob with the same payment_hash. + val (preimage, htlc1) = addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) + val htlc2 = addHtlc(makeCmdAdd(40_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, preimage)._2, alice, bob, alice2bob, bob2alice) + assert(htlc1.paymentHash == htlc2.paymentHash) + val (_, htlc3) = addHtlc(60_000_000 msat, alice, bob, alice2bob, bob2alice) + crossSign(alice, bob, alice2bob, bob2alice) + + val bobStateWithHtlc = bob.stateData.asInstanceOf[DATA_NORMAL] + + // Bob has the preimage for the first two HTLCs, but he fails them instead of fulfilling them. + failHtlc(htlc1.id, bob, alice, bob2alice, alice2bob) + failHtlc(htlc2.id, bob, alice, bob2alice, alice2bob) + failHtlc(htlc3.id, bob, alice, bob2alice, alice2bob) + bob ! CMD_SIGN() + bob2alice.expectMsgType[CommitSig] + bob2alice.forward(alice) + alice2bob.expectMsgType[RevokeAndAck] // stop here + alice2bob.expectMsgType[CommitSig] + + // At that point, the HTLCs are not in Alice's commitment anymore. + // But Bob has not revoked his commitment yet that contains them. + bob.setState(NORMAL, bobStateWithHtlc) + bob ! CMD_FULFILL_HTLC(htlc1.id, preimage) + bob2alice.expectMsgType[UpdateFulfillHtlc] // ignored + + // Bob claims the htlc outputs from his previous commit tx using its preimage. + val rcp = localClose(bob, bob2blockchain) + assert(rcp.htlcTxs.size == 3) + val htlcSuccessTxs = rcp.htlcTxs.values.flatten + assert(htlcSuccessTxs.size == 2) // Bob doesn't have the preimage for the last HTLC. + htlcSuccessTxs.foreach(tx => assert(tx.isInstanceOf[HtlcSuccessTx])) + + // Alice prepares Claim-HTLC-timeout transactions for each HTLC. + alice ! WatchFundingSpentTriggered(rcp.commitTx) + if (alice.stateData.asInstanceOf[DATA_CLOSING].commitments.params.channelFeatures.hasFeature(Features.AnchorOutputsZeroFeeHtlcTx)) { + assert(alice2blockchain.expectMsgType[PublishReplaceableTx].txInfo.isInstanceOf[ClaimLocalAnchorOutputTx]) + assert(alice2blockchain.expectMsgType[PublishFinalTx].desc == "remote-main-delayed") + } + Seq(htlc1, htlc2, htlc3).foreach(_ => assert(alice2blockchain.expectMsgType[PublishReplaceableTx].txInfo.isInstanceOf[ClaimHtlcTimeoutTx])) + val claimHtlcTimeoutTxs = getClaimHtlcTimeoutTxs(alice.stateData.asInstanceOf[DATA_CLOSING].remoteCommitPublished.get) + assert(claimHtlcTimeoutTxs.size == 3) + assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == rcp.commitTx.txid) + if (alice.stateData.asInstanceOf[DATA_CLOSING].commitments.params.channelFeatures.hasFeature(Features.AnchorOutputsZeroFeeHtlcTx)) { + alice2blockchain.expectMsgType[WatchTxConfirmed] // remote-main-delayed + } + assert(Set( + alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex, + alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex, + alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex, + ) == claimHtlcTimeoutTxs.map(_.input.outPoint.index).toSet) + alice2blockchain.expectNoMessage(100 millis) + + // Bob's commitment confirms. + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 3, rcp.commitTx) + alice2blockchain.expectNoMessage(100 millis) + alice2relayer.expectNoMessage(100 millis) + + // Alice extracts the preimage from Bob's HTLC-success and forwards it upstream. + alice ! WatchOutputSpentTriggered(htlc1.amountMsat.truncateToSatoshi, htlcSuccessTxs.head.tx) + Seq(htlc1, htlc2).foreach(htlc => inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]]) { fulfill => + assert(fulfill.htlc == htlc) + assert(fulfill.result.paymentPreimage == preimage) + assert(fulfill.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id)) + }) + alice2relayer.expectNoMessage(100 millis) + + // The HTLC-success transaction confirms: nothing to do, preimage has already been relayed. + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 6, htlcSuccessTxs.head.tx) + alice2relayer.expectNoMessage(100 millis) + + // Alice's Claim-HTLC-timeout transaction confirms: we relay the failure upstream. + val claimHtlcTimeout = claimHtlcTimeoutTxs.find(_.htlcId == htlc3.id).get + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 13, claimHtlcTimeout.tx) + inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]) { fail => + assert(fail.htlc == htlc3) + assert(fail.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc3.id)) + } + } + + test("recv WatchOutputSpentTriggered (extract preimage for removed HTLC)") { f => + extractPreimageFromRemovedHtlc(f) + } + + test("recv WatchOutputSpentTriggered (extract preimage for removed HTLC, anchor outputs)", Tag(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs)) { f => + extractPreimageFromRemovedHtlc(f) + } + + private def extractPreimageFromNextHtlcs(f: FixtureParam): Unit = { + import f._ + + // Alice sends htlcs to Bob with the same payment_hash. + val (preimage, htlc1) = addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) + val htlc2 = addHtlc(makeCmdAdd(40_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, preimage)._2, alice, bob, alice2bob, bob2alice) + assert(htlc1.paymentHash == htlc2.paymentHash) + val (_, htlc3) = addHtlc(60_000_000 msat, alice, bob, alice2bob, bob2alice) + alice ! CMD_SIGN() + alice2bob.expectMsgType[CommitSig] + alice2bob.forward(bob) + // We want to test what happens when we stop at that point. + // But for Bob to create HTLC transaction, he must have received Alice's revocation. + // So for that sake of the test, we exchange revocation and then reset Alice's state. + val aliceStateWithoutHtlcs = alice.stateData.asInstanceOf[DATA_NORMAL] + bob2alice.expectMsgType[RevokeAndAck] + bob2alice.forward(alice) + bob2alice.expectMsgType[CommitSig] + bob2alice.forward(alice) + alice2bob.expectMsgType[RevokeAndAck] + alice2bob.forward(bob) + alice.setState(NORMAL, aliceStateWithoutHtlcs) + + // At that point, the HTLCs are not in Alice's commitment yet. + val rcp = localClose(bob, bob2blockchain) + assert(rcp.htlcTxs.size == 3) + // Bob doesn't have the preimage yet for any of those HTLCs. + rcp.htlcTxs.values.foreach(tx_opt => assert(tx_opt.isEmpty)) + // Bob receives the preimage for the first two HTLCs. + bob ! CMD_FULFILL_HTLC(htlc1.id, preimage) + awaitCond(bob.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.htlcTxs.values.exists(_.nonEmpty)) + val htlcSuccessTxs = bob.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.htlcTxs.values.flatten.filter(_.isInstanceOf[HtlcSuccessTx]).toSeq + assert(htlcSuccessTxs.map(_.htlcId).toSet == Set(htlc1.id, htlc2.id)) + val batchHtlcSuccessTx = Transaction(2, htlcSuccessTxs.flatMap(_.tx.txIn), htlcSuccessTxs.flatMap(_.tx.txOut), 0) + + // Alice prepares Claim-HTLC-timeout transactions for each HTLC. + alice ! WatchFundingSpentTriggered(rcp.commitTx) + if (alice.stateData.asInstanceOf[DATA_CLOSING].commitments.params.channelFeatures.hasFeature(Features.AnchorOutputsZeroFeeHtlcTx)) { + assert(alice2blockchain.expectMsgType[PublishReplaceableTx].txInfo.isInstanceOf[ClaimLocalAnchorOutputTx]) + assert(alice2blockchain.expectMsgType[PublishFinalTx].desc == "remote-main-delayed") + } + Seq(htlc1, htlc2, htlc3).foreach(_ => assert(alice2blockchain.expectMsgType[PublishReplaceableTx].txInfo.isInstanceOf[ClaimHtlcTimeoutTx])) + val claimHtlcTimeoutTxs = getClaimHtlcTimeoutTxs(alice.stateData.asInstanceOf[DATA_CLOSING].nextRemoteCommitPublished.get) + assert(claimHtlcTimeoutTxs.size == 3) + assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == rcp.commitTx.txid) + if (alice.stateData.asInstanceOf[DATA_CLOSING].commitments.params.channelFeatures.hasFeature(Features.AnchorOutputsZeroFeeHtlcTx)) { + alice2blockchain.expectMsgType[WatchTxConfirmed] // remote-main-delayed + } + assert(Set( + alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex, + alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex, + alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex, + ) == claimHtlcTimeoutTxs.map(_.input.outPoint.index).toSet) + alice2blockchain.expectNoMessage(100 millis) + + // Bob's commitment confirms. + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 3, rcp.commitTx) + alice2blockchain.expectNoMessage(100 millis) + alice2relayer.expectNoMessage(100 millis) + + // Alice extracts the preimage from Bob's batched HTLC-success and forwards it upstream. + alice ! WatchOutputSpentTriggered(htlc1.amountMsat.truncateToSatoshi, batchHtlcSuccessTx) + Seq(htlc1, htlc2).foreach(htlc => inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFulfill]]) { fulfill => + assert(fulfill.htlc == htlc) + assert(fulfill.result.paymentPreimage == preimage) + assert(fulfill.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id)) + }) + alice2relayer.expectNoMessage(100 millis) + + // The HTLC-success transaction confirms: nothing to do, preimage has already been relayed. + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 6, batchHtlcSuccessTx) + alice2relayer.expectNoMessage(100 millis) + + // Alice's Claim-HTLC-timeout transaction confirms: we relay the failure upstream. + val claimHtlcTimeout = claimHtlcTimeoutTxs.find(_.htlcId == htlc3.id).get + alice ! WatchTxConfirmedTriggered(alice.nodeParams.currentBlockHeight, 13, claimHtlcTimeout.tx) + inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]) { fail => + assert(fail.htlc == htlc3) + assert(fail.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc3.id)) + } + } + + test("recv WatchOutputSpentTriggered (extract preimage for next batch of HTLCs)") { f => + extractPreimageFromNextHtlcs(f) + } + + test("recv WatchOutputSpentTriggered (extract preimage for next batch of HTLCs, anchor outputs)", Tag(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs)) { f => + extractPreimageFromNextHtlcs(f) } test("recv CMD_BUMP_FORCE_CLOSE_FEE (local commit)", Tag(ChannelStateTestsTags.AnchorOutputsZeroFeeHtlcTxs)) { f => @@ -441,7 +685,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val localAnchor3 = inside(alice2blockchain.expectMsgType[PublishReplaceableTx]) { tx => assert(tx.txInfo.isInstanceOf[ClaimLocalAnchorOutputTx]) assert(tx.commitTx == localCommitPublished1.commitTx) - tx.txInfo.asInstanceOf[ClaimLocalAnchorOutputTx] + tx.txInfo.asInstanceOf[ClaimLocalAnchorOutputTx] } assert(localAnchor3.confirmationTarget == ConfirmationTarget.Priority(ConfirmationPriority.Fast)) assert(alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.contains(localCommitPublished2)) @@ -453,11 +697,11 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.params.channelFeatures == channelFeatures) val listener = TestProbe() - alice.underlying.system.eventStream.subscribe(listener.ref, classOf[LocalCommitConfirmed]) - alice.underlying.system.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) + systemA.eventStream.subscribe(listener.ref, classOf[LocalCommitConfirmed]) + systemA.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) // alice sends an htlc to bob - val (_, htlca1) = addHtlc(50000000 msat, alice, bob, alice2bob, bob2alice) + val (_, htlca1) = addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) // alice sends an htlc below dust to bob val amountBelowDust = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.params.localParams.dustLimit - 100.msat val (_, htlca2) = addHtlc(amountBelowDust, alice, bob, alice2bob, bob2alice) @@ -470,18 +714,24 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(getHtlcSuccessTxs(closingState).isEmpty) assert(getHtlcTimeoutTxs(closingState).length == 1) val htlcTimeoutTx = getHtlcTimeoutTxs(closingState).head.tx - assert(closingState.claimHtlcDelayedTxs.length == 0) + assert(closingState.claimHtlcDelayedTxs.isEmpty) alice ! WatchTxConfirmedTriggered(BlockHeight(42), 0, closingState.commitTx) assert(txListener.expectMsgType[TransactionConfirmed].tx == closingState.commitTx) assert(listener.expectMsgType[LocalCommitConfirmed].refundAtBlock == BlockHeight(42) + bob.stateData.asInstanceOf[DATA_NORMAL].commitments.params.localParams.toSelfDelay.toInt) assert(listener.expectMsgType[PaymentSettlingOnChain].paymentHash == htlca1.paymentHash) // htlcs below dust will never reach the chain, once the commit tx is confirmed we can consider them failed - assert(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc == htlca2) + inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]) { settled => + assert(settled.htlc == htlca2) + assert(settled.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlca2.id)) + } alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(200), 0, closingState.claimMainDelayedOutputTx.get.tx) alice ! WatchTxConfirmedTriggered(BlockHeight(201), 0, htlcTimeoutTx) assert(alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.irrevocablySpent.values.toSet == Set(closingState.commitTx, closingState.claimMainDelayedOutputTx.get.tx, htlcTimeoutTx)) - assert(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc == htlca1) + inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]) { settled => + assert(settled.htlc == htlca1) + assert(settled.origin == alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlca1.id)) + } alice2relayer.expectNoMessage(100 millis) // We claim the htlc-delayed output now that the HTLC tx has been confirmed. @@ -505,16 +755,16 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with import f._ // alice sends a first htlc to bob - val (ra1, htlca1) = addHtlc(30000000 msat, alice, bob, alice2bob, bob2alice) + val (ra1, htlca1) = addHtlc(30_000_000 msat, alice, bob, alice2bob, bob2alice) // and more htlcs with the same payment_hash - val (_, cmd2) = makeCmdAdd(25000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight, ra1) + val (_, cmd2) = makeCmdAdd(25_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, ra1) val htlca2 = addHtlc(cmd2, alice, bob, alice2bob, bob2alice) - val (_, cmd3) = makeCmdAdd(30000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight, ra1) + val (_, cmd3) = makeCmdAdd(30_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, ra1) val htlca3 = addHtlc(cmd3, alice, bob, alice2bob, bob2alice) val amountBelowDust = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.params.localParams.dustLimit - 100.msat - val (_, dustCmd) = makeCmdAdd(amountBelowDust, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight, ra1) + val (_, dustCmd) = makeCmdAdd(amountBelowDust, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, ra1) val dust = addHtlc(dustCmd, alice, bob, alice2bob, bob2alice) - val (_, cmd4) = makeCmdAdd(20000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight + 1, ra1) + val (_, cmd4) = makeCmdAdd(20_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight + 1, ra1) val htlca4 = addHtlc(cmd4, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) val closingState = localClose(alice, alice2blockchain) @@ -525,26 +775,26 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(getHtlcSuccessTxs(closingState).isEmpty) val htlcTimeoutTxs = getHtlcTimeoutTxs(closingState).map(_.tx) assert(htlcTimeoutTxs.length == 4) - assert(closingState.claimHtlcDelayedTxs.length == 0) + assert(closingState.claimHtlcDelayedTxs.isEmpty) // if commit tx and htlc-timeout txs end up in the same block, we may receive the htlc-timeout confirmation before the commit tx confirmation alice ! WatchTxConfirmedTriggered(BlockHeight(42), 0, htlcTimeoutTxs(0)) val forwardedFail1 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(42), 1, closingState.commitTx) assert(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc == dust) - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(200), 0, closingState.claimMainDelayedOutputTx.get.tx) alice ! WatchTxConfirmedTriggered(BlockHeight(202), 0, htlcTimeoutTxs(1)) val forwardedFail2 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(202), 1, htlcTimeoutTxs(2)) val forwardedFail3 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(203), 0, htlcTimeoutTxs(3)) val forwardedFail4 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc assert(Set(forwardedFail1, forwardedFail2, forwardedFail3, forwardedFail4) == Set(htlca1, htlca2, htlca3, htlca4)) - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.claimHtlcDelayedTxs.length == 4) val claimHtlcDelayedTxs = alice.stateData.asInstanceOf[DATA_CLOSING].localCommitPublished.get.claimHtlcDelayedTxs @@ -558,10 +808,10 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with test("recv WatchTxConfirmedTriggered (local commit with htlcs only signed by local)") { f => import f._ val listener = TestProbe() - alice.underlying.system.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) + systemA.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) val aliceCommitTx = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig.commitTx.tx // alice sends an htlc - val (_, htlc) = addHtlc(4200000 msat, alice, bob, alice2bob, bob2alice) + val (_, htlc) = addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) // and signs it (but bob doesn't sign it) alice ! CMD_SIGN() alice2bob.expectMsgType[CommitSig] @@ -578,7 +828,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val origin = alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id) alice2relayer.expectMsg(RES_ADD_SETTLED(origin, htlc, HtlcResult.OnChainFail(HtlcOverriddenByLocalCommit(channelId(alice), htlc)))) // the htlc will not settle on chain - listener.expectNoMessage(2 seconds) + listener.expectNoMessage(100 millis) alice2relayer.expectNoMessage(100 millis) } @@ -590,6 +840,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with bob2alice.expectMsgType[CommitSig] bob2alice.forward(alice) alice2bob.expectMsgType[RevokeAndAck] + alice2relayer.expectNoMessage(100 millis) // the HTLC is not relayed downstream assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.htlcTxsAndRemoteSigs.size == 1) val aliceCommitTx = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig.commitTx.tx // Note that alice has not signed the htlc yet! @@ -602,15 +853,44 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // Once the commit tx and her main output are confirmed, she can consider the channel closed. alice ! WatchTxConfirmedTriggered(BlockHeight(0), 0, aliceCommitTx) closingState.claimMainDelayedOutputTx.foreach(claimMain => alice ! WatchTxConfirmedTriggered(BlockHeight(0), 0, claimMain.tx)) + alice2relayer.expectNoMessage(100 millis) + awaitCond(alice.stateName == CLOSED) + } + + test("recv WatchTxConfirmedTriggered (remote commit with htlcs not relayed)") { f => + import f._ + // Bob sends an htlc and signs it. + addHtlc(75_000_000 msat, bob, alice, bob2alice, alice2bob) + bob ! CMD_SIGN() + bob2alice.expectMsgType[CommitSig] + bob2alice.forward(alice) + alice2bob.expectMsgType[RevokeAndAck] + alice2bob.forward(bob) + alice2bob.expectMsgType[CommitSig] + alice2bob.forward(bob) + bob2alice.expectMsgType[RevokeAndAck] // not received by Alice + alice2relayer.expectNoMessage(100 millis) // the HTLC is not relayed downstream + assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.htlcTxsAndRemoteSigs.size == 1) + val bobCommitTx = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig.commitTx.tx + // We make Bob unilaterally close the channel. + val rcp = remoteClose(bobCommitTx, alice, alice2blockchain) + + channelUpdateListener.expectMsgType[LocalChannelDown] + assert(rcp.claimHtlcTxs.isEmpty) + // Alice should ignore the htlc (she hasn't relayed it yet): it is Bob's responsibility to claim it. + // Once the commit tx and her main output are confirmed, she can consider the channel closed. + alice ! WatchTxConfirmedTriggered(BlockHeight(0), 0, bobCommitTx) + rcp.claimMainOutputTx.foreach(claimMain => alice ! WatchTxConfirmedTriggered(BlockHeight(0), 0, claimMain.tx)) + alice2relayer.expectNoMessage(100 millis) awaitCond(alice.stateName == CLOSED) } test("recv WatchTxConfirmedTriggered (local commit with fulfill only signed by local)") { f => import f._ // bob sends an htlc - val (r, htlc) = addHtlc(110000000 msat, bob, alice, bob2alice, alice2bob) + val (r, htlc) = addHtlc(110_000_000 msat, bob, alice, bob2alice, alice2bob) crossSign(bob, alice, bob2alice, alice2bob) - alice2relayer.expectMsgType[RelayForward] + assert(alice2relayer.expectMsgType[RelayForward].add == htlc) val aliceCommitTx = alice.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig.commitTx.tx assert(aliceCommitTx.txOut.size == 3) // 2 main outputs + 1 htlc @@ -618,6 +898,11 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice ! CMD_FULFILL_HTLC(htlc.id, r, commit = true) alice2bob.expectMsgType[UpdateFulfillHtlc] alice2bob.forward(bob) + inside(bob2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.Fulfill]]) { settled => + assert(settled.htlc == htlc) + assert(settled.result.paymentPreimage == r) + assert(settled.origin == bob.stateData.asInstanceOf[DATA_NORMAL].commitments.originChannels(htlc.id)) + } alice2bob.expectMsgType[CommitSig] // note that bob doesn't receive the new sig! // then we make alice unilaterally close the channel @@ -630,8 +915,8 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with test("recv WatchTxConfirmedTriggered (local commit with fail not acked by remote)") { f => import f._ val listener = TestProbe() - alice.underlying.system.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) - val (_, htlc) = addHtlc(25000000 msat, alice, bob, alice2bob, bob2alice) + systemA.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) + val (_, htlc) = addHtlc(25_000_000 msat, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) failHtlc(htlc.id, bob, alice, bob2alice, alice2bob) bob ! CMD_SIGN() @@ -656,7 +941,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val origin = alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id) alice2relayer.expectMsg(RES_ADD_SETTLED(origin, htlc, HtlcResult.OnChainFail(HtlcOverriddenByLocalCommit(channelId(alice), htlc)))) // the htlc will not settle on chain - listener.expectNoMessage(2 seconds) + listener.expectNoMessage(100 millis) alice2relayer.expectNoMessage(100 millis) } @@ -664,7 +949,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with import f._ // alice sends an htlc to bob - addHtlc(50000000 msat, alice, bob, alice2bob, bob2alice) + addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) val closingState = localClose(alice, alice2blockchain) val htlcTimeoutTx = getHtlcTimeoutTxs(closingState).head @@ -672,7 +957,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // simulate a node restart after a feerate increase val beforeRestart = alice.stateData.asInstanceOf[DATA_CLOSING] alice.setState(WAIT_FOR_INIT_INTERNAL, Nothing) - alice.underlyingActor.nodeParams.setBitcoinCoreFeerates(FeeratesPerKw.single(FeeratePerKw(15_000 sat))) + alice.nodeParams.setBitcoinCoreFeerates(FeeratesPerKw.single(FeeratePerKw(15_000 sat))) alice ! INPUT_RESTORED(beforeRestart) alice2blockchain.expectMsgType[SetChannelId] awaitCond(alice.stateName == CLOSING) @@ -760,7 +1045,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // We simulate a node restart after a feerate increase. val beforeRestart = alice.stateData.asInstanceOf[DATA_CLOSING] - alice.underlyingActor.nodeParams.setBitcoinCoreFeerates(FeeratesPerKw.single(FeeratePerKw(15_000 sat))) + alice.nodeParams.setBitcoinCoreFeerates(FeeratesPerKw.single(FeeratePerKw(15_000 sat))) alice.setState(WAIT_FOR_INIT_INTERNAL, Nothing) alice ! INPUT_RESTORED(beforeRestart) alice2blockchain.expectMsgType[SetChannelId] @@ -788,10 +1073,10 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with test("recv WatchTxConfirmedTriggered (remote commit with htlcs only signed by local in next remote commit)") { f => import f._ val listener = TestProbe() - alice.underlying.system.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) + systemA.eventStream.subscribe(listener.ref, classOf[PaymentSettlingOnChain]) val bobCommitTx = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig.commitTx.tx // alice sends an htlc - val (_, htlc) = addHtlc(4200000 msat, alice, bob, alice2bob, bob2alice) + val (_, htlc) = addHtlc(50_000_000 msat, alice, bob, alice2bob, bob2alice) // and signs it (but bob doesn't sign it) alice ! CMD_SIGN() alice2bob.expectMsgType[CommitSig] @@ -807,20 +1092,26 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val origin = alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id) alice2relayer.expectMsg(RES_ADD_SETTLED(origin, htlc, HtlcResult.OnChainFail(HtlcOverriddenByLocalCommit(channelId(alice), htlc)))) // the htlc will not settle on chain - listener.expectNoMessage(2 seconds) + listener.expectNoMessage(100 millis) + alice2relayer.expectNoMessage(100 millis) } test("recv WatchTxConfirmedTriggered (next remote commit with settled htlcs)") { f => import f._ // alice sends two htlcs to bob - val (preimage1, htlc1) = addHtlc(10_000_000 msat, alice, bob, alice2bob, bob2alice) - val (_, htlc2) = addHtlc(10_000_000 msat, alice, bob, alice2bob, bob2alice) + val upstream1 = localUpstream() + val (preimage1, htlc1) = addHtlc(10_000_000 msat, alice, bob, alice2bob, bob2alice, upstream1) + val upstream2 = localUpstream() + val (_, htlc2) = addHtlc(10_000_000 msat, alice, bob, alice2bob, bob2alice, upstream2) crossSign(alice, bob, alice2bob, bob2alice) // bob fulfills one HTLC and fails the other one without revoking its previous commitment. fulfillHtlc(htlc1.id, preimage1, bob, alice, bob2alice, alice2bob) - assert(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.RemoteFulfill]].htlc == htlc1) + inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.RemoteFulfill]]) { settled => + assert(settled.htlc == htlc1) + assert(settled.origin.upstream == upstream1) + } failHtlc(htlc2.id, bob, alice, bob2alice, alice2bob) bob ! CMD_SIGN() bob2alice.expectMsgType[CommitSig] @@ -837,8 +1128,16 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val commitTx = bobCommit.commitTxAndRemoteSig.commitTx.tx alice ! WatchFundingSpentTriggered(commitTx) alice ! WatchTxConfirmedTriggered(BlockHeight(42), 0, commitTx) - // alice propagates the HTLC failure upstream. - assert(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc == htlc2) + // the two HTLCs have been overridden by the on-chain commit + // the first one is a no-op since we already relayed the fulfill upstream + inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]) { settled => + assert(settled.htlc == htlc1) + assert(settled.origin.upstream == upstream1) + } + inside(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]]) { settled => + assert(settled.htlc == htlc2) + assert(settled.origin.upstream == upstream2) + } alice2relayer.expectNoMessage(100 millis) } @@ -979,11 +1278,11 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.params.channelFeatures == channelFeatures) // alice sends a first htlc to bob - val (ra1, htlca1) = addHtlc(15000000 msat, alice, bob, alice2bob, bob2alice) + val (ra1, htlca1) = addHtlc(15_000_000 msat, alice, bob, alice2bob, bob2alice) // alice sends more htlcs with the same payment_hash - val (_, cmd2) = makeCmdAdd(15000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight, ra1) + val (_, cmd2) = makeCmdAdd(15_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, ra1) val htlca2 = addHtlc(cmd2, alice, bob, alice2bob, bob2alice) - val (_, cmd3) = makeCmdAdd(20000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight - 1, ra1) + val (_, cmd3) = makeCmdAdd(20_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight - 1, ra1) val htlca3 = addHtlc(cmd3, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) @@ -1004,14 +1303,14 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(201), 0, claimHtlcTimeoutTxs(0)) val forwardedFail1 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(202), 0, claimHtlcTimeoutTxs(1)) val forwardedFail2 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(203), 1, claimHtlcTimeoutTxs(2)) val forwardedFail3 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc assert(Set(forwardedFail1, forwardedFail2, forwardedFail3) == Set(htlca1, htlca2, htlca3)) - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) awaitCond(alice.stateName == CLOSED) } @@ -1030,12 +1329,12 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with test("recv WatchTxConfirmedTriggered (remote commit) followed by CMD_FULFILL_HTLC") { f => import f._ // An HTLC Bob -> Alice is cross-signed that will be fulfilled later. - val (r1, htlc1) = addHtlc(110000000 msat, CltvExpiryDelta(48), bob, alice, bob2alice, alice2bob) + val (r1, htlc1) = addHtlc(110_000_000 msat, CltvExpiryDelta(48), bob, alice, bob2alice, alice2bob) crossSign(bob, alice, bob2alice, alice2bob) - alice2relayer.expectMsgType[RelayForward] + assert(alice2relayer.expectMsgType[RelayForward].add == htlc1) // An HTLC Alice -> Bob is only signed by Alice: Bob has two spendable commit tx. - val (_, htlc2) = addHtlc(95000000 msat, CltvExpiryDelta(144), alice, bob, alice2bob, bob2alice) + val (_, htlc2) = addHtlc(95_000_000 msat, CltvExpiryDelta(144), alice, bob, alice2bob, bob2alice) alice ! CMD_SIGN() alice2bob.expectMsgType[CommitSig] // We stop here: Alice sent her CommitSig, but doesn't hear back from Bob. @@ -1107,13 +1406,13 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(alice.stateData.asInstanceOf[DATA_NORMAL].commitments.params.channelFeatures == channelFeatures) // alice sends a first htlc to bob - val (ra1, htlca1) = addHtlc(15000000 msat, alice, bob, alice2bob, bob2alice) + val (ra1, htlca1) = addHtlc(15_000_000 msat, alice, bob, alice2bob, bob2alice) // alice sends more htlcs with the same payment_hash - val (_, cmd2) = makeCmdAdd(20000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight, ra1) + val (_, cmd2) = makeCmdAdd(20_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, ra1) val htlca2 = addHtlc(cmd2, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) // The last one is only signed by Alice: Bob has two spendable commit tx. - val (_, cmd3) = makeCmdAdd(20000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight, ra1) + val (_, cmd3) = makeCmdAdd(20_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, ra1) val htlca3 = addHtlc(cmd3, alice, bob, alice2bob, bob2alice) alice ! CMD_SIGN() alice2bob.expectMsgType[CommitSig] @@ -1145,14 +1444,14 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(201), 0, claimHtlcTimeoutTxs(0)) val forwardedFail1 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(202), 0, claimHtlcTimeoutTxs(1)) val forwardedFail2 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(203), 1, claimHtlcTimeoutTxs(2)) val forwardedFail3 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc assert(Set(forwardedFail1, forwardedFail2, forwardedFail3) == htlcs) - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) awaitCond(alice.stateName == CLOSED) } @@ -1165,14 +1464,14 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(201), 0, claimHtlcTimeoutTxs(0)) val forwardedFail1 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(202), 0, claimHtlcTimeoutTxs(1)) val forwardedFail2 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(203), 1, claimHtlcTimeoutTxs(2)) val forwardedFail3 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc assert(Set(forwardedFail1, forwardedFail2, forwardedFail3) == htlcs) - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) awaitCond(alice.stateName == CLOSED) } @@ -1185,26 +1484,26 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(201), 0, claimHtlcTimeoutTxs(0)) val forwardedFail1 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(202), 0, claimHtlcTimeoutTxs(1)) val forwardedFail2 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) alice ! WatchTxConfirmedTriggered(BlockHeight(203), 1, claimHtlcTimeoutTxs(2)) val forwardedFail3 = alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]].htlc assert(Set(forwardedFail1, forwardedFail2, forwardedFail3) == htlcs) - alice2relayer.expectNoMessage(250 millis) + alice2relayer.expectNoMessage(100 millis) awaitCond(alice.stateName == CLOSED) } test("recv WatchTxConfirmedTriggered (next remote commit) followed by CMD_FULFILL_HTLC") { f => import f._ // An HTLC Bob -> Alice is cross-signed that will be fulfilled later. - val (r1, htlc1) = addHtlc(110000000 msat, CltvExpiryDelta(64), bob, alice, bob2alice, alice2bob) + val (r1, htlc1) = addHtlc(110_000_000 msat, CltvExpiryDelta(64), bob, alice, bob2alice, alice2bob) crossSign(bob, alice, bob2alice, alice2bob) - alice2relayer.expectMsgType[RelayForward] + assert(alice2relayer.expectMsgType[RelayForward].add == htlc1) // An HTLC Alice -> Bob is only signed by Alice: Bob has two spendable commit tx. - val (_, htlc2) = addHtlc(95000000 msat, CltvExpiryDelta(32), alice, bob, alice2bob, bob2alice) + val (_, htlc2) = addHtlc(95_000_000 msat, CltvExpiryDelta(32), alice, bob, alice2bob, bob2alice) alice ! CMD_SIGN() alice2bob.expectMsgType[CommitSig] alice2bob.forward(bob) @@ -1286,10 +1585,10 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val oldStateData = alice.stateData assert(oldStateData.asInstanceOf[DATA_NORMAL].commitments.params.channelFeatures == channelFeatures) // This HTLC will be fulfilled. - val (ra1, htlca1) = addHtlc(25000000 msat, alice, bob, alice2bob, bob2alice) + val (ra1, htlca1) = addHtlc(25_000_000 msat, alice, bob, alice2bob, bob2alice) // These 2 HTLCs should timeout on-chain, but since alice lost data, she won't be able to claim them. - val (ra2, _) = addHtlc(15000000 msat, alice, bob, alice2bob, bob2alice) - val (_, cmd) = makeCmdAdd(15000000 msat, bob.underlyingActor.nodeParams.nodeId, alice.underlyingActor.nodeParams.currentBlockHeight, ra2) + val (ra2, _) = addHtlc(15_000_000 msat, alice, bob, alice2bob, bob2alice) + val (_, cmd) = makeCmdAdd(15_000_000 msat, bob.nodeParams.nodeId, alice.nodeParams.currentBlockHeight, ra2) addHtlc(cmd, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) fulfillHtlc(htlca1.id, ra1, bob, alice, bob2alice, alice2bob) @@ -1335,7 +1634,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // bob's commit tx sends directly to alice's wallet assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == bobCommitTx.txid) awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].futureRemoteCommitPublished.isDefined) - alice2blockchain.expectNoMessage(250 millis) // alice ignores the htlc-timeout + alice2blockchain.expectNoMessage(100 millis) // alice ignores the htlc-timeout // actual test starts here alice ! WatchTxConfirmedTriggered(BlockHeight(0), 0, bobCommitTx) @@ -1362,7 +1661,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == bobCommitTx.txid) awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].futureRemoteCommitPublished.isDefined) assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == claimMainTx.txid) - alice2blockchain.expectNoMessage(250 millis) // alice ignores the htlc-timeout + alice2blockchain.expectNoMessage(100 millis) // alice ignores the htlc-timeout // actual test starts here alice ! WatchTxConfirmedTriggered(BlockHeight(0), 0, bobCommitTx) @@ -1399,9 +1698,9 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // Bob's second commit tx contains 1 incoming htlc and 1 outgoing htlc val (localCommit2, htlcAlice1, htlcBob1) = { - val (ra, htlcAlice) = addHtlc(35000000 msat, alice, bob, alice2bob, bob2alice) + val (ra, htlcAlice) = addHtlc(35_000_000 msat, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) - val (rb, htlcBob) = addHtlc(20000000 msat, bob, alice, bob2alice, alice2bob) + val (rb, htlcBob) = addHtlc(20_000_000 msat, bob, alice, bob2alice, alice2bob) crossSign(bob, alice, bob2alice, alice2bob) val localCommit = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit (localCommit, (htlcAlice, ra), (htlcBob, rb)) @@ -1415,9 +1714,9 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // Bob's third commit tx contains 2 incoming htlcs and 2 outgoing htlcs val (localCommit3, htlcAlice2, htlcBob2) = { - val (ra, htlcAlice) = addHtlc(25000000 msat, alice, bob, alice2bob, bob2alice) + val (ra, htlcAlice) = addHtlc(25_000_000 msat, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) - val (rb, htlcBob) = addHtlc(18000000 msat, bob, alice, bob2alice, alice2bob) + val (rb, htlcBob) = addHtlc(18_000_000 msat, bob, alice, bob2alice, alice2bob) crossSign(bob, alice, bob2alice, alice2bob) val localCommit = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit (localCommit, (htlcAlice, ra), (htlcBob, rb)) @@ -1499,7 +1798,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // alice watches outputs that can be spent by both parties val watchedOutpoints = Seq(alice2blockchain.expectMsgType[WatchOutputSpent], alice2blockchain.expectMsgType[WatchOutputSpent], alice2blockchain.expectMsgType[WatchOutputSpent]).map(_.outputIndex).toSet assert(watchedOutpoints == (rvk.mainPenaltyTx.get :: rvk.htlcPenaltyTxs).map(_.input.outPoint.index).toSet) - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) (bobRevokedTx, rvk) } @@ -1562,7 +1861,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex == mainPenalty.txIn.head.outPoint.index) val htlcOutpoints = (1 to htlcCount).map(_ => alice2blockchain.expectMsgType[WatchOutputSpent].outputIndex).toSet assert(htlcOutpoints == htlcPenaltyTxs.flatMap(_.txIn.map(_.outPoint.index)).toSet) - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.last } @@ -1652,7 +1951,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == rvk.claimMainOutputTx.get.tx.txid) } (1 to 5).foreach(_ => alice2blockchain.expectMsgType[WatchOutputSpent]) // main output penalty and 4 htlc penalties - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) // bob manages to claim 2 htlc outputs before alice can penalize him: 1 htlc-success and 1 htlc-timeout. val (fulfilledHtlc, preimage) = revokedCloseFixture.htlcsAlice.head @@ -1679,7 +1978,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val watchSpent1 = alice2blockchain.expectMsgType[WatchOutputSpent] assert(watchSpent1.txId == bobHtlcSuccessTx1.tx.txid) assert(watchSpent1.outputIndex == claimHtlcSuccessPenalty1.input.outPoint.index) - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) alice ! WatchOutputSpentTriggered(bobHtlcTimeoutTx.amountIn, bobHtlcTimeoutTx.tx) awaitCond(alice.stateData.asInstanceOf[DATA_CLOSING].revokedCommitPublished.head.claimHtlcDelayedPenaltyTxs.size == 2) @@ -1690,7 +1989,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val watchSpent2 = alice2blockchain.expectMsgType[WatchOutputSpent] assert(watchSpent2.txId == bobHtlcTimeoutTx.tx.txid) assert(watchSpent2.outputIndex == claimHtlcTimeoutPenalty.input.outPoint.index) - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) // bob RBFs his htlc-success with a different transaction val bobHtlcSuccessTx2 = bobHtlcSuccessTx1.tx.copy(txIn = TxIn(OutPoint(randomTxId(), 0), Nil, 0) +: bobHtlcSuccessTx1.tx.txIn) @@ -1705,7 +2004,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with val watchSpent3 = alice2blockchain.expectMsgType[WatchOutputSpent] assert(watchSpent3.txId == bobHtlcSuccessTx2.txid) assert(watchSpent3.outputIndex == claimHtlcSuccessPenalty2.input.outPoint.index) - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) // transactions confirm: alice can move to the closed state val remainingHtlcPenaltyTxs = rvk.htlcPenaltyTxs.filterNot(htlcPenalty => bobOutpoints.contains(htlcPenalty.input.outPoint)) @@ -1759,7 +2058,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == rvk.commitTx.txid) assert(alice2blockchain.expectMsgType[WatchTxConfirmed].txId == rvk.claimMainOutputTx.get.tx.txid) (1 to 5).foreach(_ => alice2blockchain.expectMsgType[WatchOutputSpent]) // main output penalty and 4 htlc penalties - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) // bob claims multiple htlc outputs in a single transaction (this is possible with anchor outputs because signatures // use sighash_single | sighash_anyonecanpay) @@ -1813,7 +2112,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice2blockchain.expectMsgType[WatchOutputSpent] ).map(w => OutPoint(w.txId, w.outputIndex)).toSet assert(watchedOutpoints == spentOutpoints) - alice2blockchain.expectNoMessage(1 second) + alice2blockchain.expectNoMessage(100 millis) } private def testRevokedTxConfirmed(f: FixtureParam, channelFeatures: ChannelFeatures): Unit = { @@ -1827,8 +2126,8 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // bob's second commit tx contains 2 incoming htlcs val (bobRevokedTx, htlcs1) = { - val (_, htlc1) = addHtlc(35000000 msat, alice, bob, alice2bob, bob2alice) - val (_, htlc2) = addHtlc(20000000 msat, alice, bob, alice2bob, bob2alice) + val (_, htlc1) = addHtlc(35_000_000 msat, alice, bob, alice2bob, bob2alice) + val (_, htlc2) = addHtlc(20_000_000 msat, alice, bob, alice2bob, bob2alice) crossSign(alice, bob, alice2bob, bob2alice) val bobCommitTx = bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig.commitTx.tx assert(bobCommitTx.txOut.size == initOutputCount + 2) @@ -1837,8 +2136,8 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // bob's third commit tx contains 1 of the previous htlcs and 2 new htlcs val htlcs2 = { - val (_, htlc3) = addHtlc(25000000 msat, alice, bob, alice2bob, bob2alice) - val (_, htlc4) = addHtlc(18000000 msat, alice, bob, alice2bob, bob2alice) + val (_, htlc3) = addHtlc(25_000_000 msat, alice, bob, alice2bob, bob2alice) + val (_, htlc4) = addHtlc(18_000_000 msat, alice, bob, alice2bob, bob2alice) failHtlc(htlcs1.head.id, bob, alice, bob2alice, alice2bob) crossSign(bob, alice, bob2alice, alice2bob) assert(bob.stateData.asInstanceOf[DATA_NORMAL].commitments.latest.localCommit.commitTxAndRemoteSig.commitTx.tx.txOut.size == initOutputCount + 3) @@ -1847,7 +2146,7 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with // alice's first htlc has been failed assert(alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.Fail]].htlc == htlcs1.head) - alice2relayer.expectNoMessage(1 second) + alice2relayer.expectNoMessage(100 millis) // bob publishes one of his revoked txs which quickly confirms alice ! WatchFundingSpentTriggered(bobRevokedTx) @@ -1859,9 +2158,10 @@ class ClosingStateSpec extends TestKitBaseClass with FixtureAnyFunSuiteLike with alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]], alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]], alice2relayer.expectMsgType[RES_ADD_SETTLED[Origin, HtlcResult.OnChainFail]] - ).map(_.htlc).toSet - assert(htlcFails == Set(htlcs1(1), htlcs2(0), htlcs2(1))) - alice2relayer.expectNoMessage(1 second) + ).map(f => (f.htlc, f.origin)).toSet + val expectedFails = Set(htlcs1(1), htlcs2(0), htlcs2(1)).map(htlc => (htlc, alice.stateData.asInstanceOf[DATA_CLOSING].commitments.originChannels(htlc.id))) + assert(htlcFails == expectedFails) + alice2relayer.expectNoMessage(100 millis) } test("recv WatchTxConfirmedTriggered (one revoked tx, pending htlcs)") { f => diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/db/PendingCommandsDbSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/db/PendingCommandsDbSpec.scala index 7f957da50b..f5f395be19 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/db/PendingCommandsDbSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/db/PendingCommandsDbSpec.scala @@ -60,9 +60,11 @@ class PendingCommandsDbSpec extends AnyFunSuite { assert(db.listSettlementCommands(channelId1).toSet == Set.empty) db.addSettlementCommand(channelId1, msg0) db.addSettlementCommand(channelId1, msg0) // duplicate + db.addSettlementCommand(channelId1, CMD_FAIL_HTLC(msg0.id, FailureReason.EncryptedDownstreamFailure(randomBytes32()))) // conflicting command db.addSettlementCommand(channelId1, msg1) db.addSettlementCommand(channelId1, msg2) db.addSettlementCommand(channelId1, msg3) + db.addSettlementCommand(channelId1, CMD_FULFILL_HTLC(msg3.id, randomBytes32())) // conflicting command db.addSettlementCommand(channelId1, msg4) db.addSettlementCommand(channelId2, msg0) // same messages but for different channel db.addSettlementCommand(channelId2, msg1) @@ -71,6 +73,8 @@ class PendingCommandsDbSpec extends AnyFunSuite { assert(db.listSettlementCommands().toSet == Set((channelId1, msg0), (channelId1, msg1), (channelId1, msg2), (channelId1, msg3), (channelId1, msg4), (channelId2, msg0), (channelId2, msg1))) db.removeSettlementCommand(channelId1, msg1.id) assert(db.listSettlementCommands().toSet == Set((channelId1, msg0), (channelId1, msg2), (channelId1, msg3), (channelId1, msg4), (channelId2, msg0), (channelId2, msg1))) + db.removeSettlementCommand(channelId1, msg0.id) + assert(db.listSettlementCommands().toSet == Set((channelId1, msg2), (channelId1, msg3), (channelId1, msg4), (channelId2, msg0), (channelId2, msg1))) } } diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/ChannelRelayerSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/ChannelRelayerSpec.scala index eca6dea134..2475ac6cc7 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/ChannelRelayerSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/ChannelRelayerSpec.scala @@ -731,6 +731,38 @@ class ChannelRelayerSpec extends ScalaTestWithActorTestKit(ConfigFactory.load("a } } + test("settlement success followed by failure") { f => + import f._ + + val channelId1 = channelIds(realScid1) + val u = createLocalUpdate(channelId1) + channelRelayer ! WrappedLocalChannelUpdate(u) + + val payload = ChannelRelay.Standard(realScid1, outgoingAmount, outgoingExpiry) + val r = createValidIncomingPacket(payload, endorsementIn = 3) + channelRelayer ! Relay(r, TestConstants.Alice.nodeParams.nodeId) + val fwd1 = expectFwdAdd(register, channelIds(realScid1), outgoingAmount, outgoingExpiry, 3) + fwd1.message.replyTo ! RES_SUCCESS(fwd1.message, channelId1) + + // The downstream HTLC is fulfilled. + val downstream = UpdateAddHtlc(randomBytes32(), 7, outgoingAmount, paymentHash, outgoingExpiry, emptyOnionPacket, None, 0.4375, None) + fwd1.message.origin.replyTo ! RES_ADD_SETTLED(fwd1.message.origin, downstream, HtlcResult.RemoteFulfill(UpdateFulfillHtlc(downstream.channelId, downstream.id, paymentPreimage))) + val fulfill = inside(register.expectMessageType[Register.Forward[CMD_FULFILL_HTLC]]) { fwd => + assert(fwd.channelId == r.add.channelId) + assert(fwd.message.id == r.add.id) + assert(fwd.message.r == paymentPreimage) + fwd.message + } + + // The command is stored in the pending settlements DB. + eventually(assert(nodeParams.db.pendingCommands.listSettlementCommands(r.add.channelId) == Seq(fulfill.copy(commit = false)))) + + // The downstream HTLC is now failed (e.g. because a revoked commitment confirmed that doesn't include it): this conflicting command is ignored. + fwd1.message.origin.replyTo ! RES_ADD_SETTLED(fwd1.message.origin, downstream, HtlcResult.OnChainFail(HtlcOverriddenByLocalCommit(downstream.channelId, downstream))) + register.expectNoMessage(100 millis) + assert(nodeParams.db.pendingCommands.listSettlementCommands(r.add.channelId) == Seq(fulfill.copy(commit = false))) + } + test("get outgoing channels") { f => import PaymentPacketSpec._ import f._ @@ -797,7 +829,7 @@ object ChannelRelayerSpec { val channelId1: ByteVector32 = randomBytes32() val channelId2: ByteVector32 = randomBytes32() - val channelIds = Map( + val channelIds: Map[ShortChannelId, ByteVector32] = Map( realScid1 -> channelId1, realScid2 -> channelId2, localAlias1 -> channelId1, diff --git a/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/NodeRelayerSpec.scala b/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/NodeRelayerSpec.scala index 8f87b90c55..520e044e57 100644 --- a/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/NodeRelayerSpec.scala +++ b/eclair-core/src/test/scala/fr/acinq/eclair/payment/relay/NodeRelayerSpec.scala @@ -29,7 +29,7 @@ import fr.acinq.bitcoin.scalacompat.{Block, ByteVector32, Crypto} import fr.acinq.eclair.EncodedNodeId.ShortChannelIdDir import fr.acinq.eclair.FeatureSupport.{Mandatory, Optional} import fr.acinq.eclair.Features.{AsyncPaymentPrototype, BasicMultiPartPayment, PaymentSecret, VariableLengthOnion} -import fr.acinq.eclair.channel.{CMD_FAIL_HTLC, CMD_FULFILL_HTLC, Register, Upstream} +import fr.acinq.eclair.channel._ import fr.acinq.eclair.crypto.Sphinx import fr.acinq.eclair.io.{Peer, PeerReadyManager, Switchboard} import fr.acinq.eclair.payment.Bolt11Invoice.ExtraHop @@ -628,6 +628,49 @@ class NodeRelayerSpec extends ScalaTestWithActorTestKit(ConfigFactory.load("appl register.expectNoMessage(100 millis) } + test("ignore downstream failures after fulfill") { f => + import f._ + + // Receive an upstream multi-part payment. + val (nodeRelayer, parent) = f.createNodeRelay(incomingMultiPart.head) + incomingMultiPart.foreach(p => nodeRelayer ! NodeRelay.Relay(p, randomKey().publicKey)) + + val getPeerInfo = register.expectMessageType[Register.ForwardNodeId[Peer.GetPeerInfo]](100 millis) + getPeerInfo.message.replyTo.foreach(_ ! Peer.PeerNotFound(getPeerInfo.nodeId)) + + mockPayFSM.expectMessageType[SendPaymentConfig] + val outgoingPayment = mockPayFSM.expectMessageType[SendMultiPartPayment] + validateOutgoingPayment(outgoingPayment) + // those are adapters for pay-fsm messages + val nodeRelayerAdapters = outgoingPayment.replyTo + + // A first downstream HTLC is fulfilled: we immediately forward the fulfill upstream. + nodeRelayerAdapters ! PreimageReceived(paymentHash, paymentPreimage) + val fulfills = incomingMultiPart.map { p => + val fwd = register.expectMessageType[Register.Forward[CMD_FULFILL_HTLC]] + assert(fwd.channelId == p.add.channelId) + assert(fwd.message == CMD_FULFILL_HTLC(p.add.id, paymentPreimage, commit = true)) + fwd + } + // We store the commands in our DB in case we restart before relaying them upstream. + val upstreamChannels = fulfills.map(_.channelId).toSet + upstreamChannels.foreach(channelId => { + eventually(assert(nodeParams.db.pendingCommands.listSettlementCommands(channelId).toSet == fulfills.filter(_.channelId == channelId).map(_.message.copy(commit = false)).toSet)) + }) + + // The remaining downstream HTLCs are failed (e.g. because a revoked commitment confirmed that doesn't include them). + // The corresponding commands conflict with the previous fulfill and are ignored. + val downstreamHtlc = UpdateAddHtlc(randomBytes32(), 7, outgoingAmount, paymentHash, outgoingExpiry, TestConstants.emptyOnionPacket, None, 0.4375, None) + val failure = LocalFailure(outgoingAmount, Nil, HtlcOverriddenByLocalCommit(randomBytes32(), downstreamHtlc)) + nodeRelayerAdapters ! PaymentFailed(relayId, incomingMultiPart.head.add.paymentHash, Seq(failure)) + eventListener.expectNoMessage(100 millis) // the payment didn't succeed, but didn't fail either, so we just ignore it + parent.expectMessageType[NodeRelayer.RelayComplete] + register.expectNoMessage(100 millis) + upstreamChannels.foreach(channelId => { + assert(nodeParams.db.pendingCommands.listSettlementCommands(channelId).toSet == fulfills.filter(_.channelId == channelId).map(_.message.copy(commit = false)).toSet) + }) + } + test("relay incoming single-part payment") { f => import f._