diff --git a/lightning-invoice/src/utils.rs b/lightning-invoice/src/utils.rs index 25a7cf77d5e..f83a6d412b0 100644 --- a/lightning-invoice/src/utils.rs +++ b/lightning-invoice/src/utils.rs @@ -1373,22 +1373,7 @@ mod test { assert_eq!(other_events.borrow().len(), 1); check_payment_claimable(&other_events.borrow()[0], payment_hash, payment_secret, payment_amt, payment_preimage_opt, invoice.recover_payee_pub_key()); do_claim_payment_along_route(&nodes[0], &[&vec!(&nodes[fwd_idx])[..]], false, payment_preimage); - let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 2); - match events[0] { - Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => { - assert_eq!(payment_preimage, *ev_preimage); - assert_eq!(payment_hash, *ev_hash); - assert_eq!(fee_paid_msat, &Some(0)); - }, - _ => panic!("Unexpected event") - } - match events[1] { - Event::PaymentPathSuccessful { payment_hash: hash, .. } => { - assert_eq!(hash, Some(payment_hash)); - }, - _ => panic!("Unexpected event") - } + expect_payment_sent(&nodes[0], payment_preimage, None, true, true); } #[test] diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index 2cc71a2ecc7..106a7299fd1 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -806,7 +806,7 @@ impl { assert_eq!(*node_id, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -1437,7 +1438,7 @@ fn claim_while_disconnected_monitor_update_fail() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa); check_added_monitors!(nodes[0], 1); - expect_payment_sent!(nodes[0], payment_preimage_1); + expect_payment_path_successful!(nodes[0]); claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2); } @@ -2191,7 +2192,7 @@ fn test_fail_htlc_on_broadcast_after_claim() { expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true); expect_payment_path_successful!(nodes[0]); } @@ -2444,7 +2445,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { assert!(updates.update_fee.is_none()); assert_eq!(updates.update_fulfill_htlcs.len(), 1); nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[1], payment_preimage_0); + expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false); assert_eq!(updates.update_add_htlcs.len(), 1); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); updates.commitment_signed @@ -2461,7 +2462,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) { expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000); check_added_monitors!(nodes[1], 1); - commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false); let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), 2); @@ -2562,7 +2563,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); if htlc_status == HTLCStatusAtDupClaim::Cleared { commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); expect_payment_path_successful!(nodes[0]); @@ -2589,7 +2590,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id())); assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); } if htlc_status != HTLCStatusAtDupClaim::Cleared { commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false); @@ -2786,7 +2787,7 @@ fn double_temp_error() { assert_eq!(node_id, nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1); check_added_monitors!(nodes[0], 0); - expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); + expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1); check_added_monitors!(nodes[0], 1); nodes[0].node.process_pending_htlc_forwards(); @@ -3011,3 +3012,67 @@ fn test_inbound_reload_without_init_mon() { do_test_inbound_reload_without_init_mon(false, true); do_test_inbound_reload_without_init_mon(false, false); } + +#[test] +fn test_blocked_chan_preimage_release() { + // Test that even if a channel's `ChannelMonitorUpdate` flow is blocked waiting on an event to + // be handled HTLC preimage `ChannelMonitorUpdate`s will still go out. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + create_announced_chan_between_nodes(&nodes, 0, 1).2; + create_announced_chan_between_nodes(&nodes, 1, 2).2; + + send_payment(&nodes[0], &[&nodes[1], &nodes[2]], 5_000_000); + + // Tee up two payments in opposite directions across nodes[1], one it sent to generate a + // PaymentSent event and one it forwards. + let (payment_preimage_1, payment_hash_1, _) = route_payment(&nodes[1], &[&nodes[2]], 1_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[2], &[&nodes[1], &nodes[0]], 1_000_000); + + // Claim the first payment to get a `PaymentSent` event (but don't handle it yet). + nodes[2].node.claim_funds(payment_preimage_1); + check_added_monitors(&nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash_1, 1_000_000); + + let cs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[2], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_fulfill_htlc(&nodes[2].node.get_our_node_id(), &cs_htlc_fulfill_updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[1], nodes[2], cs_htlc_fulfill_updates.commitment_signed, false); + check_added_monitors(&nodes[1], 0); + + // Now claim the second payment on nodes[0], which will ultimately result in nodes[1] trying to + // claim an HTLC on its channel with nodes[2], but that channel is blocked on the above + // `PaymentSent` event. + nodes[0].node.claim_funds(payment_preimage_2); + check_added_monitors(&nodes[0], 1); + expect_payment_claimed!(nodes[0], payment_hash_2, 1_000_000); + + let as_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[0], nodes[1].node.get_our_node_id()); + nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &as_htlc_fulfill_updates.update_fulfill_htlcs[0]); + check_added_monitors(&nodes[1], 1); // We generate only a preimage monitor update + assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); + + // Finish the CS dance between nodes[0] and nodes[1]. + commitment_signed_dance!(nodes[1], nodes[0], as_htlc_fulfill_updates.commitment_signed, false); + check_added_monitors(&nodes[1], 0); + + let events = nodes[1].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 3); + if let Event::PaymentSent { .. } = events[0] {} else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } + if let Event::PaymentForwarded { .. } = events[1] {} else { panic!(); } + + // The event processing should release the last RAA update. + check_added_monitors(&nodes[1], 1); + + // When we fetch the next update the message getter will generate the next update for nodes[2], + // generating a further monitor update. + let bs_htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[2].node.get_our_node_id()); + check_added_monitors(&nodes[1], 1); + + nodes[2].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_htlc_fulfill_updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[2], nodes[1], bs_htlc_fulfill_updates.commitment_signed, false); + expect_payment_sent(&nodes[2], payment_preimage_2, None, true, true); +} diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index e559ac33550..44ecd8f4cae 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -3223,7 +3223,7 @@ impl Channel { /// generating an appropriate error *after* the channel state has been updated based on the /// revoke_and_ack message. pub fn revoke_and_ack(&mut self, msg: &msgs::RevokeAndACK, - fee_estimator: &LowerBoundedFeeEstimator, logger: &L + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, hold_mon_update: bool, ) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option), ChannelError> where F::Target: FeeEstimator, L::Target: Logger, { @@ -3404,6 +3404,22 @@ impl Channel { } } + let release_monitor = self.context.blocked_monitor_updates.is_empty() && !hold_mon_update; + let release_state_str = + if hold_mon_update { "Holding" } else if release_monitor { "Releasing" } else { "Blocked" }; + macro_rules! return_with_htlcs_to_fail { + ($htlcs_to_fail: expr) => { + if !release_monitor { + self.context.blocked_monitor_updates.push(PendingChannelMonitorUpdate { + update: monitor_update, + }); + return Ok(($htlcs_to_fail, None)); + } else { + return Ok(($htlcs_to_fail, Some(monitor_update))); + } + } + } + if (self.context.channel_state & ChannelState::MonitorUpdateInProgress as u32) == ChannelState::MonitorUpdateInProgress as u32 { // We can't actually generate a new commitment transaction (incl by freeing holding // cells) while we can't update the monitor, so we just return what we have. @@ -3422,7 +3438,7 @@ impl Channel { self.context.monitor_pending_failures.append(&mut revoked_htlcs); self.context.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs); log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.context.channel_id())); - return Ok((Vec::new(), self.push_ret_blockable_mon_update(monitor_update))); + return_with_htlcs_to_fail!(Vec::new()); } match self.free_holding_cell_htlcs(fee_estimator, logger) { @@ -3432,8 +3448,11 @@ impl Channel { self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} with holding cell HTLCs freed. {} monitor update.", + log_bytes!(self.context.channel_id()), release_state_str); + self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) + return_with_htlcs_to_fail!(htlcs_to_fail); }, (None, htlcs_to_fail) => { if require_commitment { @@ -3444,14 +3463,19 @@ impl Channel { self.context.latest_monitor_update_id = monitor_update.update_id; monitor_update.updates.append(&mut additional_update.updates); - log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.", - log_bytes!(self.context.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len()); + log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed. {} monitor update.", + log_bytes!(self.context.channel_id()), + update_fail_htlcs.len() + update_fail_malformed_htlcs.len(), + release_state_str); + self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) + return_with_htlcs_to_fail!(htlcs_to_fail); } else { - log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.context.channel_id())); + log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary. {} monitor update.", + log_bytes!(self.context.channel_id()), release_state_str); + self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs); - Ok((htlcs_to_fail, self.push_ret_blockable_mon_update(monitor_update))) + return_with_htlcs_to_fail!(htlcs_to_fail); } } } diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b22c3716ccb..ab4d2fa382b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1097,7 +1097,11 @@ where /// could be in the middle of being processed without the direct mutex held. /// /// See `ChannelManager` struct-level documentation for lock order requirements. + #[cfg(not(any(test, feature = "_test_utils")))] pending_events: Mutex)>>, + #[cfg(any(test, feature = "_test_utils"))] + pub(crate) pending_events: Mutex)>>, + /// A simple atomic flag to ensure only one task at a time can be processing events asynchronously. pending_events_processor: AtomicBool, @@ -4936,12 +4940,18 @@ where self.pending_outbound_payments.finalize_claims(sources, &self.pending_events); } - fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_id: [u8; 32]) { + fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option, from_onchain: bool, next_channel_outpoint: OutPoint) { match source { HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => { debug_assert!(self.background_events_processed_since_startup.load(Ordering::Acquire), "We don't support claim_htlc claims during startup - monitors may not be available yet"); - self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger); + let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: next_channel_outpoint, + counterparty_node_id: path.hops[0].pubkey, + }; + self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, + session_priv, path, from_onchain, ev_completion_action, &self.pending_events, + &self.logger); }, HTLCSource::PreviousHopData(hop_data) => { let prev_outpoint = hop_data.outpoint; @@ -4957,7 +4967,7 @@ where fee_earned_msat, claim_from_onchain_tx: from_onchain, prev_channel_id: Some(prev_outpoint.to_channel_id()), - next_channel_id: Some(next_channel_id), + next_channel_id: Some(next_channel_outpoint.to_channel_id()), outbound_amount_forwarded_msat: forwarded_htlc_value_msat, }, downstream_counterparty_and_funding_outpoint: None, @@ -5712,6 +5722,7 @@ where } fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> { + let funding_txo; let (htlc_source, forwarded_htlc_value) = { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -5723,12 +5734,14 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan) + let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan); + funding_txo = chan.get().context.get_funding_txo().expect("We won't accept a fulfill until funded"); + res }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id)) } }; - self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id); + self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo); Ok(()) } @@ -5925,10 +5938,19 @@ where let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { hash_map::Entry::Occupied(mut chan) => { - let funding_txo = chan.get().context.get_funding_txo(); - let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger), chan); + let funding_txo_opt = chan.get().context.get_funding_txo(); + let mon_update_blocked = if let Some(funding_txo) = funding_txo_opt { + self.pending_events.lock().unwrap().iter().any(|(_, action)| { + action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: funding_txo, + counterparty_node_id: *counterparty_node_id, + }) + }) + } else { false }; + let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, + chan.get_mut().revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan); let res = if let Some(monitor_update) = monitor_update_opt { - handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, + handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan).map(|_| ()) } else { Ok(()) }; (htlcs_to_fail, res) @@ -6103,7 +6125,7 @@ where MonitorEvent::HTLCEvent(htlc_update) => { if let Some(preimage) = htlc_update.payment_preimage { log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0)); - self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id()); + self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint); } else { log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0)); let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() }; @@ -8794,7 +8816,13 @@ where // generating a `PaymentPathSuccessful` event but regenerating // it and the `PaymentSent` on every restart until the // `ChannelMonitor` is removed. - pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger); + let compl_action = + EventCompletionAction::ReleaseRAAChannelMonitorUpdate { + channel_funding_outpoint: monitor.get_funding_txo().0, + counterparty_node_id: path.hops[0].pubkey, + }; + pending_outbounds.claim_htlc(payment_id, preimage, session_priv, + path, false, compl_action, &pending_events, &args.logger); pending_events_read = pending_events.into_inner().unwrap(); } }, @@ -8815,7 +8843,7 @@ where // downstream chan is closed (because we don't have a // channel_id -> peer map entry). counterparty_opt.is_none(), - monitor.get_funding_txo().0.to_channel_id())) + monitor.get_funding_txo().0)) } else { None } } else { // If it was an outbound payment, we've handled it above - if a preimage @@ -9084,12 +9112,12 @@ where channel_manager.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver); } - for (source, preimage, downstream_value, downstream_closed, downstream_chan_id) in pending_claims_to_replay { + for (source, preimage, downstream_value, downstream_closed, downstream_funding) in pending_claims_to_replay { // We use `downstream_closed` in place of `from_onchain` here just as a guess - we // don't remember in the `ChannelMonitor` where we got a preimage from, but if the // channel is closed we just assume that it probably came from an on-chain claim. channel_manager.claim_funds_internal(source, preimage, Some(downstream_value), - downstream_closed, downstream_chan_id); + downstream_closed, downstream_funding); } //TODO: Broadcast channel update for closed channels, but only after we've made a @@ -9267,6 +9295,7 @@ mod tests { let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -9294,16 +9323,8 @@ mod tests { // Note that successful MPP payments will generate a single PaymentSent event upon the first // path's success and a PaymentPathSuccessful event for each path's success. let events = nodes[0].node.get_and_clear_pending_events(); - assert_eq!(events.len(), 3); + assert_eq!(events.len(), 2); match events[0] { - Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => { - assert_eq!(Some(payment_id), *id); - assert_eq!(payment_preimage, *preimage); - assert_eq!(our_payment_hash, *hash); - }, - _ => panic!("Unexpected event"), - } - match events[1] { Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); @@ -9311,7 +9332,7 @@ mod tests { }, _ => panic!("Unexpected event"), } - match events[2] { + match events[1] { Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => { assert_eq!(payment_id, *actual_payment_id); assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap()); diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 84bc1a1b3f0..38e3a8b270c 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -17,7 +17,7 @@ use crate::chain::transaction::OutPoint; use crate::events::{ClosureReason, Event, HTLCDestination, MessageSendEvent, MessageSendEventsProvider, PathFailure, PaymentPurpose, PaymentFailureReason}; use crate::events::bump_transaction::{BumpTransactionEventHandler, Wallet, WalletSource}; use crate::ln::{PaymentPreimage, PaymentHash, PaymentSecret}; -use crate::ln::channelmanager::{AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA}; +use crate::ln::channelmanager::{self, AChannelManager, ChainParameters, ChannelManager, ChannelManagerReadArgs, RAACommitmentOrder, PaymentSendFailure, RecipientOnionFields, PaymentId, MIN_CLTV_EXPIRY_DELTA}; use crate::routing::gossip::{P2PGossipSync, NetworkGraph, NetworkUpdate}; use crate::routing::router::{self, PaymentParameters, Route}; use crate::ln::features::InitFeatures; @@ -1670,8 +1670,8 @@ macro_rules! commitment_signed_dance { bs_revoke_and_ack } }; - ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */) => { - assert!($crate::ln::functional_test_utils::commitment_signed_dance_through_cp_raa(&$node_a, &$node_b, $fail_backwards).is_none()); + ($node_a: expr, $node_b: expr, (), $fail_backwards: expr, true /* skip last step */, false /* no extra message */, $incl_claim: expr) => { + assert!($crate::ln::functional_test_utils::commitment_signed_dance_through_cp_raa(&$node_a, &$node_b, $fail_backwards, $incl_claim).is_none()); }; ($node_a: expr, $node_b: expr, $commitment_signed: expr, $fail_backwards: expr) => { $crate::ln::functional_test_utils::do_commitment_signed_dance(&$node_a, &$node_b, &$commitment_signed, $fail_backwards, false); @@ -1683,10 +1683,10 @@ macro_rules! commitment_signed_dance { /// `revoke_and_ack` response to it. /// /// Returns any additional message `node_b` generated in addition to the `revoke_and_ack` response. -pub fn commitment_signed_dance_through_cp_raa(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool) -> Option { +pub fn commitment_signed_dance_through_cp_raa(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, '_, '_>, fail_backwards: bool, includes_claim: bool) -> Option { let (extra_msg_option, bs_revoke_and_ack) = do_main_commitment_signed_dance(node_a, node_b, fail_backwards); node_a.node.handle_revoke_and_ack(&node_b.node.get_our_node_id(), &bs_revoke_and_ack); - check_added_monitors(node_a, 1); + check_added_monitors(node_a, if includes_claim { 0 } else { 1 }); extra_msg_option } @@ -1733,7 +1733,23 @@ pub fn do_commitment_signed_dance(node_a: &Node<'_, '_, '_>, node_b: &Node<'_, ' node_a.node.handle_commitment_signed(&node_b.node.get_our_node_id(), commitment_signed); check_added_monitors!(node_a, 1); - commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false); + // If this commitment signed dance was due to a claim, don't check for an RAA monitor update. + let got_claim = node_a.node.pending_events.lock().unwrap().iter().any(|(ev, action)| { + let matching_action = if let Some(channelmanager::EventCompletionAction::ReleaseRAAChannelMonitorUpdate + { channel_funding_outpoint, counterparty_node_id }) = action + { + if channel_funding_outpoint.to_channel_id() == commitment_signed.channel_id { + assert_eq!(*counterparty_node_id, node_b.node.get_our_node_id()); + true + } else { false } + } else { false }; + if matching_action { + if let Event::PaymentSent { .. } = ev {} else { panic!(); } + } + matching_action + }); + if fail_backwards { assert!(!got_claim); } + commitment_signed_dance!(node_a, node_b, (), fail_backwards, true, false, got_claim); if skip_last_step { return; } @@ -1878,7 +1894,7 @@ macro_rules! expect_payment_claimed { pub fn expect_payment_sent>(node: &H, expected_payment_preimage: PaymentPreimage, expected_fee_msat_opt: Option>, - expect_per_path_claims: bool, + expect_per_path_claims: bool, expect_post_ev_mon_update: bool, ) { let events = node.node().get_and_clear_pending_events(); let expected_payment_hash = PaymentHash( @@ -1888,6 +1904,9 @@ pub fn expect_payment_sent>(node: &H, } else { assert_eq!(events.len(), 1); } + if expect_post_ev_mon_update { + check_added_monitors(node, 1); + } let expected_payment_id = match events[0] { Event::PaymentSent { ref payment_id, ref payment_preimage, ref payment_hash, ref fee_paid_msat } => { assert_eq!(expected_payment_preimage, *payment_preimage); @@ -1914,17 +1933,6 @@ pub fn expect_payment_sent>(node: &H, } } -#[cfg(test)] -#[macro_export] -macro_rules! expect_payment_sent_without_paths { - ($node: expr, $expected_payment_preimage: expr) => { - expect_payment_sent!($node, $expected_payment_preimage, None::, false); - }; - ($node: expr, $expected_payment_preimage: expr, $expected_fee_msat_opt: expr) => { - expect_payment_sent!($node, $expected_payment_preimage, $expected_fee_msat_opt, false); - } -} - #[macro_export] macro_rules! expect_payment_sent { ($node: expr, $expected_payment_preimage: expr) => { @@ -1935,7 +1943,7 @@ macro_rules! expect_payment_sent { }; ($node: expr, $expected_payment_preimage: expr, $expected_fee_msat_opt: expr, $expect_paths: expr) => { $crate::ln::functional_test_utils::expect_payment_sent(&$node, $expected_payment_preimage, - $expected_fee_msat_opt.map(|o| Some(o)), $expect_paths); + $expected_fee_msat_opt.map(|o| Some(o)), $expect_paths, true); } } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 8de2e9f631a..95f4869077c 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2121,7 +2121,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_removes.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent_without_paths!(nodes[0], payment_preimage_1); + expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false); nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &send_1.msgs[0]); nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &send_1.commitment_msg); @@ -2150,7 +2150,7 @@ fn channel_reserve_in_flight_removes() { nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_cs.commitment_signed); check_added_monitors!(nodes[0], 1); let as_raa = get_event_msg!(nodes[0], MessageSendEvent::SendRevokeAndACK, nodes[1].node.get_our_node_id()); - expect_payment_sent_without_paths!(nodes[0], payment_preimage_2); + expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false); nodes[1].node.handle_revoke_and_ack(&nodes[0].node.get_our_node_id(), &as_raa); check_added_monitors!(nodes[1], 1); @@ -3580,7 +3580,7 @@ fn test_dup_events_on_peer_disconnect() { check_added_monitors!(nodes[1], 1); let claim_msgs = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &claim_msgs.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.peer_disconnected(&nodes[1].node.get_our_node_id()); nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); @@ -3693,6 +3693,7 @@ fn test_simple_peer_disconnect() { _ => panic!("Unexpected event"), } } + check_added_monitors(&nodes[0], 1); claim_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_preimage_4); fail_payment(&nodes[0], &vec!(&nodes[1], &nodes[2]), payment_hash_6); @@ -4911,7 +4912,7 @@ fn test_duplicate_payment_hash_one_failure_one_success() { nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); commitment_signed_dance!(nodes[0], nodes[1], &updates.commitment_signed, false); - expect_payment_sent(&nodes[0], our_payment_preimage, None, true); + expect_payment_sent(&nodes[0], our_payment_preimage, None, true, true); } #[test] @@ -5454,7 +5455,7 @@ fn do_htlc_claim_local_commitment_only(use_dust: bool) { let bs_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_updates.commitment_signed); check_added_monitors!(nodes[0], 1); @@ -9379,7 +9380,7 @@ fn test_inconsistent_mpp_params() { pass_along_path(&nodes[0], &[&nodes[2], &nodes[3]], 15_000_000, our_payment_hash, Some(our_payment_secret), events.pop().unwrap(), true, None); do_claim_payment_along_route(&nodes[0], &[&[&nodes[1], &nodes[3]], &[&nodes[2], &nodes[3]]], false, our_payment_preimage); - expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true); + expect_payment_sent(&nodes[0], our_payment_preimage, Some(None), true, true); } #[test] @@ -10105,3 +10106,89 @@ fn test_remove_expired_inbound_unfunded_channels() { check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed); } + +fn do_test_multi_post_event_actions(do_reload: bool) { + // Tests handling multiple post-Event actions at once. + // There is specific code in ChannelManager to handle channels where multiple post-Event + // `ChannelMonitorUpdates` are pending at once. This test exercises that code. + // + // Specifically, we test calling `get_and_clear_pending_events` while there are two + // PaymentSents from different channels and one channel has two pending `ChannelMonitorUpdate`s + // - one from an RAA and one from an inbound commitment_signed. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let (persister, chain_monitor, nodes_0_deserialized); + let mut nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let chan_id = create_announced_chan_between_nodes(&nodes, 0, 1).2; + let chan_id_2 = create_announced_chan_between_nodes(&nodes, 0, 2).2; + + send_payment(&nodes[0], &[&nodes[1]], 1_000_000); + send_payment(&nodes[0], &[&nodes[2]], 1_000_000); + + let (our_payment_preimage, our_payment_hash, _) = route_payment(&nodes[0], &[&nodes[1]], 1_000_000); + let (payment_preimage_2, payment_hash_2, _) = route_payment(&nodes[0], &[&nodes[2]], 1_000_000); + + nodes[1].node.claim_funds(our_payment_preimage); + check_added_monitors!(nodes[1], 1); + expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); + + nodes[2].node.claim_funds(payment_preimage_2); + check_added_monitors!(nodes[2], 1); + expect_payment_claimed!(nodes[2], payment_hash_2, 1_000_000); + + for dest in &[1, 2] { + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[*dest], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[*dest].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[*dest], htlc_fulfill_updates.commitment_signed, false); + check_added_monitors(&nodes[0], 0); + } + + let (route, payment_hash_3, _, payment_secret_3) = + get_route_and_payment_hash!(nodes[1], nodes[0], 100_000); + let payment_id = PaymentId(payment_hash_3.0); + nodes[1].node.send_payment_with_route(&route, payment_hash_3, + RecipientOnionFields::secret_only(payment_secret_3), payment_id).unwrap(); + check_added_monitors(&nodes[1], 1); + + let send_event = SendEvent::from_node(&nodes[1]); + nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &send_event.msgs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &send_event.commitment_msg); + assert!(nodes[0].node.get_and_clear_pending_msg_events().is_empty()); + + if do_reload { + let nodes_0_serialized = nodes[0].node.encode(); + let chan_0_monitor_serialized = get_monitor!(nodes[0], chan_id).encode(); + let chan_1_monitor_serialized = get_monitor!(nodes[0], chan_id_2).encode(); + reload_node!(nodes[0], test_default_channel_config(), &nodes_0_serialized, &[&chan_0_monitor_serialized, &chan_1_monitor_serialized], persister, chain_monitor, nodes_0_deserialized); + + nodes[1].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + nodes[2].node.peer_disconnected(&nodes[0].node.get_our_node_id()); + + reconnect_nodes(&nodes[0], &nodes[1], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + reconnect_nodes(&nodes[0], &nodes[2], (false, false), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0), (false, false)); + } + + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 4); + if let Event::PaymentSent { payment_preimage, .. } = events[0] { + assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); + } else { panic!(); } + if let Event::PaymentSent { payment_preimage, .. } = events[1] { + assert!(payment_preimage == our_payment_preimage || payment_preimage == payment_preimage_2); + } else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[2] {} else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[3] {} else { panic!(); } + + // After the events are processed, the ChannelMonitorUpdates will be released and, upon their + // completion, we'll respond to nodes[1] with an RAA + CS. + get_revoke_commit_msgs(&nodes[0], &nodes[1].node.get_our_node_id()); + check_added_monitors(&nodes[0], 3); +} + +#[test] +fn test_multi_post_event_actions() { + do_test_multi_post_event_actions(true); + do_test_multi_post_event_actions(false); +} diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index a916dbfc9e2..3cd2365c852 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -345,7 +345,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { if prev_commitment_tx { // To build a previous commitment transaction, deliver one round of commitment messages. nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &b_htlc_msgs.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_raa, as_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -454,7 +454,7 @@ fn do_test_claim_value_force_close(prev_commitment_tx: bool) { if prev_commitment_tx { expect_payment_path_successful!(nodes[0]); } else { - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, true, false); } assert_eq!(sorted_vec(vec![sent_htlc_balance.clone(), sent_htlc_timeout_balance.clone()]), sorted_vec(nodes[0].chain_monitor.chain_monitor.get_monitor(funding_outpoint).unwrap().get_claimable_balances())); @@ -681,7 +681,7 @@ fn test_balances_on_local_commitment_htlcs() { // Now confirm nodes[1]'s HTLC claim, giving nodes[0] the preimage. Note that the "maybe // claimable" balance remains until we see ANTI_REORG_DELAY blocks. mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]); - expect_payment_sent!(nodes[0], payment_preimage_2); + expect_payment_sent(&nodes[0], payment_preimage_2, None, true, false); assert_eq!(sorted_vec(vec![Balance::ClaimableAwaitingConfirmations { claimable_amount_satoshis: 1_000_000 - 10_000 - 20_000 - chan_feerate * (channel::commitment_tx_base_weight(&channel_type_features) + 2 * channel::COMMITMENT_TX_WEIGHT_PER_HTLC) / 1000, @@ -1538,7 +1538,7 @@ fn test_revoked_counterparty_aggregated_claims() { // Confirm A's HTLC-Success tranasction which presumably raced B's claim, causing B to create a // new claim. mine_transaction(&nodes[1], &as_revoked_txn[1]); - expect_payment_sent!(nodes[1], claimed_payment_preimage); + expect_payment_sent(&nodes[1], claimed_payment_preimage, None, true, false); let mut claim_txn_2: Vec<_> = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().clone(); claim_txn_2.sort_unstable_by_key(|tx| if tx.input.iter().any(|inp| inp.previous_output.txid == as_revoked_txn[0].txid()) { 0 } else { 1 }); // Once B sees the HTLC-Success transaction it splits its claim transaction into two, though in diff --git a/lightning/src/ln/outbound_payment.rs b/lightning/src/ln/outbound_payment.rs index 30e718dccd6..47c409e689a 100644 --- a/lightning/src/ln/outbound_payment.rs +++ b/lightning/src/ln/outbound_payment.rs @@ -1118,7 +1118,7 @@ impl OutboundPayments { pub(super) fn claim_htlc( &self, payment_id: PaymentId, payment_preimage: PaymentPreimage, session_priv: SecretKey, - path: Path, from_onchain: bool, + path: Path, from_onchain: bool, ev_completion_action: EventCompletionAction, pending_events: &Mutex)>>, logger: &L, ) where L::Target: Logger { @@ -1135,7 +1135,7 @@ impl OutboundPayments { payment_preimage, payment_hash, fee_paid_msat, - }, None)); + }, Some(ev_completion_action.clone()))); payment.get_mut().mark_fulfilled(); } @@ -1152,7 +1152,7 @@ impl OutboundPayments { payment_id, payment_hash, path, - }, None)); + }, Some(ev_completion_action.clone()))); } } } else { diff --git a/lightning/src/ln/payment_tests.rs b/lightning/src/ln/payment_tests.rs index 2ae606106c9..32141d95129 100644 --- a/lightning/src/ln/payment_tests.rs +++ b/lightning/src/ln/payment_tests.rs @@ -577,7 +577,7 @@ fn do_retry_with_no_persist(confirm_before_reload: bool) { mine_transaction(&nodes[0], &as_commitment_tx); } mine_transaction(&nodes[0], &bs_htlc_claim_txn[0]); - expect_payment_sent!(nodes[0], payment_preimage_1); + expect_payment_sent(&nodes[0], payment_preimage_1, None, true, false); connect_blocks(&nodes[0], TEST_FINAL_CLTV*4 + 20); let (first_htlc_timeout_tx, second_htlc_timeout_tx) = { let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); @@ -936,7 +936,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co if payment_timeout { expect_payment_failed!(nodes[0], payment_hash, false); } else { - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, true, false); } // If we persist the ChannelManager after we get the PaymentSent event, we shouldn't get it @@ -953,7 +953,7 @@ fn do_test_dup_htlc_onchain_fails_on_reload(persist_manager_post_event: bool, co } else if payment_timeout { expect_payment_failed!(nodes[0], payment_hash, false); } else { - expect_payment_sent!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, true, false); } // Note that if we re-connect the block which exposed nodes[0] to the payment preimage (but @@ -1005,7 +1005,7 @@ fn test_fulfill_restart_failure() { let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); - expect_payment_sent_without_paths!(nodes[0], payment_preimage); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); // Now reload nodes[1]... reload_node!(nodes[1], &chan_manager_serialized, &[&chan_0_monitor_serialized], persister, new_chain_monitor, nodes_1_deserialized); @@ -1706,6 +1706,7 @@ fn do_test_intercepted_payment(test: InterceptTest) { }, _ => panic!("Unexpected event") } + check_added_monitors(&nodes[0], 1); } else if test == InterceptTest::Timeout { let mut block = create_dummy_block(nodes[0].best_block_hash(), 42, Vec::new()); connect_block(&nodes[0], &block); @@ -1860,7 +1861,7 @@ fn do_accept_underpaying_htlcs_config(num_mpp_parts: usize) { payment_preimage); // The sender doesn't know that the penultimate hop took an extra fee. expect_payment_sent(&nodes[0], payment_preimage, - Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true); + Some(Some(total_fee_msat - skimmed_fee_msat * num_mpp_parts as u64)), true, true); } #[derive(PartialEq)] @@ -2284,6 +2285,7 @@ fn auto_retry_partial_failure() { assert_eq!(bs_claim_update.update_fulfill_htlcs.len(), 1); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_claim_update.update_fulfill_htlcs[0]); + expect_payment_sent(&nodes[0], payment_preimage, None, false, false); nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_claim_update.commitment_signed); check_added_monitors!(nodes[0], 1); let (as_third_raa, as_third_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id()); @@ -2298,6 +2300,7 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_third_raa); check_added_monitors!(nodes[0], 1); + expect_payment_path_successful!(nodes[0]); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[0]); nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_claim_update.update_fulfill_htlcs[1]); @@ -2314,7 +2317,10 @@ fn auto_retry_partial_failure() { nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_second_raa); check_added_monitors!(nodes[0], 1); - expect_payment_sent!(nodes[0], payment_preimage); + let events = nodes[0].node.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + if let Event::PaymentPathSuccessful { .. } = events[0] {} else { panic!(); } + if let Event::PaymentPathSuccessful { .. } = events[1] {} else { panic!(); } } #[test] @@ -3103,7 +3109,7 @@ fn test_threaded_payment_retries() { } } -fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) { +fn do_no_missing_sent_on_reload(persist_manager_with_payment: bool, at_midpoint: bool) { // Test that if we reload in the middle of an HTLC claim commitment signed dance we'll still // receive the PaymentSent event even if the ChannelManager had no idea about the payment when // it was last persisted. @@ -3132,10 +3138,20 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) { check_added_monitors!(nodes[1], 1); expect_payment_claimed!(nodes[1], our_payment_hash, 1_000_000); - let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); - nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); - nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); - check_added_monitors!(nodes[0], 1); + if at_midpoint { + let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]); + nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed); + check_added_monitors!(nodes[0], 1); + } else { + let htlc_fulfill_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); + nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &htlc_fulfill_updates.update_fulfill_htlcs[0]); + commitment_signed_dance!(nodes[0], nodes[1], htlc_fulfill_updates.commitment_signed, false); + // Ignore the PaymentSent event which is now pending on nodes[0] - if we were to handle it we'd + // be expected to ignore the eventual conflicting PaymentFailed, but by not looking at it we + // expect to get the PaymentSent again later. + check_added_monitors(&nodes[0], 0); + } // The ChannelMonitor should always be the latest version, as we're required to persist it // during the commitment signed handling. @@ -3181,8 +3197,14 @@ fn do_no_missing_sent_on_midpoint_reload(persist_manager_with_payment: bool) { #[test] fn no_missing_sent_on_midpoint_reload() { - do_no_missing_sent_on_midpoint_reload(false); - do_no_missing_sent_on_midpoint_reload(true); + do_no_missing_sent_on_reload(false, true); + do_no_missing_sent_on_reload(true, true); +} + +#[test] +fn no_missing_sent_on_reload() { + do_no_missing_sent_on_reload(false, false); + do_no_missing_sent_on_reload(true, false); } fn do_claim_from_closed_chan(fail_payment: bool) { diff --git a/lightning/src/ln/reorg_tests.rs b/lightning/src/ln/reorg_tests.rs index be756840adc..7b357ec037b 100644 --- a/lightning/src/ln/reorg_tests.rs +++ b/lightning/src/ln/reorg_tests.rs @@ -230,7 +230,7 @@ fn test_counterparty_revoked_reorg() { // Connect the HTLC claim transaction for HTLC 3 mine_transaction(&nodes[1], &unrevoked_local_txn[2]); - expect_payment_sent!(nodes[1], payment_preimage_3); + expect_payment_sent(&nodes[1], payment_preimage_3, None, true, false); assert!(nodes[1].node.get_and_clear_pending_msg_events().is_empty()); // Connect blocks to confirm the unrevoked commitment transaction diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 3aa48c1b45d..cc448c9939f 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -345,7 +345,7 @@ fn htlc_fail_async_shutdown() { nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &updates.commitment_signed); check_added_monitors!(nodes[1], 1); nodes[1].node.handle_shutdown(&nodes[0].node.get_our_node_id(), &node_0_shutdown); - commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false); + commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false); let updates_2 = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()); assert!(updates_2.update_add_htlcs.is_empty()); diff --git a/pending_changelog/matt-event-compl-forward-compat.txt b/pending_changelog/matt-event-compl-forward-compat.txt new file mode 100644 index 00000000000..a77d5a2ba41 --- /dev/null +++ b/pending_changelog/matt-event-compl-forward-compat.txt @@ -0,0 +1,4 @@ +## Backwards Compatibility + * In cases where `Event` processing is delayed beyond the time required for a + full channel update round-trip with a counterparty, LDK versions prior to + 0.0.115 may fail to read a `ChannelManager` written by 0.0.115.