Skip to content

Commit

Permalink
Delay RAA-after-next processing until PaymentSent is are handled
Browse files Browse the repository at this point in the history
In 0ad1f4c we fixed a nasty bug
where a failure to persist a `ChannelManager` faster than a
`ChannelMonitor` could result in the loss of a `PaymentSent` event,
eventually resulting in a `PaymentFailed` instead!

As noted in that commit, there's still some risk, though its been
substantially reduced - if we receive an `update_fulfill_htlc`
message for an outbound payment, and persist the initial removal
`ChannelMonitorUpdate`, then respond with our own
`commitment_signed` + `revoke_and_ack`, followed by receiving our
peer's final `revoke_and_ack`, and then persist the
`ChannelMonitorUpdate` generated from that, all prior to completing
a `ChannelManager` persistence, we'll still forget the HTLC and
eventually trigger a `PaymentFailed` rather than the correct
`PaymentSent`.

Here we fully fix the issue by delaying the final
`ChannelMonitorUpdate` persistence until the `PaymentSent` event
has been processed and document the fact that a spurious
`PaymentFailed` event can still be generated for a sent payment.

The original fix in 0ad1f4c is
still incredibly useful here, allowing us to avoid blocking the
first `ChannelMonitorUpdate` until the event processing completes,
as this would cause us to add event-processing delay in our general
commitment update latency. Instead, we ultimately race the user
handling the `PaymentSent` event with how long it takes our
`revoke_and_ack` + `commitment_signed` to make it to our
counterparty and receive the response `revoke_and_ack`. This should
give the user plenty of time to handle the event before we need to
make progress.

Sadly, because we change our `ChannelMonitorUpdate` semantics, this
change requires a number of test changes, avoiding checking for a
post-RAA `ChannelMonitorUpdate` until after we process a
`PaymentSent` event. Note that this does not apply to payments we
learned the preimage for on-chain - ensuring `PaymentSent` events
from such resolutions will be addressed in a future PR. Thus, tests
which resolve payments on-chain switch to a direct call to the
`expect_payment_sent` function with the claim-expected flag unset.
  • Loading branch information
TheBlueMatt committed Mar 28, 2023
1 parent 57bebe7 commit c7e8f27
Show file tree
Hide file tree
Showing 14 changed files with 229 additions and 101 deletions.
17 changes: 1 addition & 16 deletions lightning-invoice/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1091,22 +1091,7 @@ mod test {
let payment_preimage_opt = if user_generated_pmt_hash { None } else { Some(payment_preimage) };
expect_payment_claimable!(&nodes[fwd_idx], payment_hash, payment_secret, payment_amt, payment_preimage_opt, route.paths[0].last().unwrap().pubkey);
do_claim_payment_along_route(&nodes[0], &[&vec!(&nodes[fwd_idx])[..]], false, payment_preimage);
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentSent { payment_preimage: ref ev_preimage, payment_hash: ref ev_hash, ref fee_paid_msat, .. } => {
assert_eq!(payment_preimage, *ev_preimage);
assert_eq!(payment_hash, *ev_hash);
assert_eq!(fee_paid_msat, &Some(0));
},
_ => panic!("Unexpected event")
}
match events[1] {
Event::PaymentPathSuccessful { payment_hash: hash, .. } => {
assert_eq!(hash, Some(payment_hash));
},
_ => panic!("Unexpected event")
}
expect_payment_sent(&nodes[0], payment_preimage, None, true, true);
}

#[test]
Expand Down
8 changes: 4 additions & 4 deletions lightning/src/chain/chainmonitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -788,7 +788,7 @@ mod tests {
use bitcoin::{BlockHeader, TxMerkleNode};
use bitcoin::hashes::Hash;
use crate::{check_added_monitors, check_closed_broadcast, check_closed_event};
use crate::{expect_payment_sent, expect_payment_claimed, expect_payment_sent_without_paths, expect_payment_path_successful, get_event_msg};
use crate::{expect_payment_claimed, expect_payment_path_successful, get_event_msg};
use crate::{get_htlc_update_msgs, get_local_commitment_txn, get_revoke_commit_msgs, get_route_and_payment_hash, unwrap_send_err};
use crate::chain::{ChannelMonitorUpdateStatus, Confirm, Watch};
use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS;
Expand Down Expand Up @@ -871,7 +871,7 @@ mod tests {

let updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_update) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
Expand All @@ -884,7 +884,7 @@ mod tests {
let bs_first_raa = get_event_msg!(nodes[1], MessageSendEvent::SendRevokeAndACK, nodes[0].node.get_our_node_id());

nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_second_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_2);
expect_payment_sent(&nodes[0], payment_preimage_2, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_second_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_first_raa);
Expand Down Expand Up @@ -972,7 +972,7 @@ mod tests {
}
}

expect_payment_sent!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, true, false);
}

#[test]
Expand Down
15 changes: 8 additions & 7 deletions lightning/src/ln/chanmon_update_fail_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1371,6 +1371,7 @@ fn claim_while_disconnected_monitor_update_fail() {
MessageSendEvent::UpdateHTLCs { ref node_id, ref updates } => {
assert_eq!(*node_id, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &updates.commitment_signed);
check_added_monitors!(nodes[0], 1);

Expand Down Expand Up @@ -1408,7 +1409,7 @@ fn claim_while_disconnected_monitor_update_fail() {

nodes[0].node.handle_revoke_and_ack(&nodes[1].node.get_our_node_id(), &bs_raa);
check_added_monitors!(nodes[0], 1);
expect_payment_sent!(nodes[0], payment_preimage_1);
expect_payment_path_successful!(nodes[0]);

claim_payment(&nodes[0], &[&nodes[1]], payment_preimage_2);
}
Expand Down Expand Up @@ -2140,7 +2141,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
expect_pending_htlcs_forwardable_and_htlc_handling_failed!(nodes[1], vec![HTLCDestination::NextHopChannel { node_id: Some(nodes[2].node.get_our_node_id()), channel_id: chan_id_2 }]);

nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
commitment_signed_dance!(nodes[0], nodes[1], bs_updates.commitment_signed, true, true);
expect_payment_path_successful!(nodes[0]);
}
Expand Down Expand Up @@ -2376,7 +2377,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
assert!(updates.update_fee.is_none());
assert_eq!(updates.update_fulfill_htlcs.len(), 1);
nodes[1].node.handle_update_fulfill_htlc(&nodes[0].node.get_our_node_id(), &updates.update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[1], payment_preimage_0);
expect_payment_sent(&nodes[1], payment_preimage_0, None, false, false);
assert_eq!(updates.update_add_htlcs.len(), 1);
nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]);
updates.commitment_signed
Expand All @@ -2393,7 +2394,7 @@ fn do_channel_holding_cell_serialize(disconnect: bool, reload_a: bool) {
expect_payment_claimable!(nodes[1], payment_hash_1, payment_secret_1, 100000);
check_added_monitors!(nodes[1], 1);

commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false);
commitment_signed_dance!(nodes[1], nodes[0], (), false, true, false, false);

let events = nodes[1].node.get_and_clear_pending_events();
assert_eq!(events.len(), 2);
Expand Down Expand Up @@ -2493,7 +2494,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
if htlc_status == HTLCStatusAtDupClaim::Cleared {
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
expect_payment_path_successful!(nodes[0]);
Expand All @@ -2520,7 +2521,7 @@ fn do_test_reconnect_dup_htlc_claims(htlc_status: HTLCStatusAtDupClaim, second_f
bs_updates = Some(get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id()));
assert_eq!(bs_updates.as_ref().unwrap().update_fulfill_htlcs.len(), 1);
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_updates.as_ref().unwrap().update_fulfill_htlcs[0]);
expect_payment_sent_without_paths!(nodes[0], payment_preimage);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
}
if htlc_status != HTLCStatusAtDupClaim::Cleared {
commitment_signed_dance!(nodes[0], nodes[1], &bs_updates.as_ref().unwrap().commitment_signed, false);
Expand Down Expand Up @@ -2717,7 +2718,7 @@ fn double_temp_error() {
assert_eq!(node_id, nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &update_fulfill_1);
check_added_monitors!(nodes[0], 0);
expect_payment_sent_without_paths!(nodes[0], payment_preimage_1);
expect_payment_sent(&nodes[0], payment_preimage_1, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &commitment_signed_b1);
check_added_monitors!(nodes[0], 1);
nodes[0].node.process_pending_htlc_forwards();
Expand Down
11 changes: 6 additions & 5 deletions lightning/src/ln/channel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3421,7 +3421,8 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
/// waiting on this revoke_and_ack. The generation of this new commitment_signed may also fail,
/// generating an appropriate error *after* the channel state has been updated based on the
/// revoke_and_ack message.
pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L) -> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
pub fn revoke_and_ack<L: Deref>(&mut self, msg: &msgs::RevokeAndACK, logger: &L, hold_mon_update: bool)
-> Result<(Vec<(HTLCSource, PaymentHash)>, Option<&ChannelMonitorUpdate>), ChannelError>
where L::Target: Logger,
{
if (self.channel_state & (ChannelState::ChannelReady as u32)) != (ChannelState::ChannelReady as u32) {
Expand Down Expand Up @@ -3618,7 +3619,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
self.monitor_pending_failures.append(&mut revoked_htlcs);
self.monitor_pending_finalized_fulfills.append(&mut finalized_claimed_htlcs);
log_debug!(logger, "Received a valid revoke_and_ack for channel {} but awaiting a monitor update resolution to reply.", log_bytes!(self.channel_id()));
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown);
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown) && !hold_mon_update;
self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
update: monitor_update, flown: fly_monitor,
});
Expand All @@ -3635,7 +3636,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
monitor_update.updates.append(&mut additional_update.updates);

self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown);
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown) && !hold_mon_update;
self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
update: monitor_update, flown: fly_monitor,
});
Expand All @@ -3654,7 +3655,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
log_debug!(logger, "Received a valid revoke_and_ack for channel {}. Responding with a commitment update with {} HTLCs failed.",
log_bytes!(self.channel_id()), update_fail_htlcs.len() + update_fail_malformed_htlcs.len());
self.monitor_updating_paused(false, true, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown);
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown) && !hold_mon_update;
self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
update: monitor_update, flown: fly_monitor,
});
Expand All @@ -3663,7 +3664,7 @@ impl<Signer: WriteableEcdsaChannelSigner> Channel<Signer> {
} else {
log_debug!(logger, "Received a valid revoke_and_ack for channel {} with no reply necessary.", log_bytes!(self.channel_id()));
self.monitor_updating_paused(false, false, false, to_forward_infos, revoked_htlcs, finalized_claimed_htlcs);
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown);
let fly_monitor = self.pending_monitor_updates.iter().all(|upd| upd.flown) && !hold_mon_update;
self.pending_monitor_updates.push(PendingChannelMonitorUpdate {
update: monitor_update, flown: fly_monitor,
});
Expand Down
60 changes: 38 additions & 22 deletions lightning/src/ln/channelmanager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -918,7 +918,11 @@ where
///
/// Note that events MUST NOT be removed from pending_events without also holding the
/// `pending_events_processor` lock.
#[cfg(not(any(test, feature = "_test_utils")))]
pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,
#[cfg(any(test, feature = "_test_utils"))]
pub(crate) pending_events: Mutex<VecDeque<(events::Event, Option<EventCompletionAction>)>>,

/// A simple mutex to ensure only one thread can be processing
pending_events_processor: Mutex<()>,
/// See `ChannelManager` struct-level documentation for lock order requirements.
Expand Down Expand Up @@ -4180,10 +4184,16 @@ where
self.pending_outbound_payments.finalize_claims(sources, &self.pending_events);
}

fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_id: [u8; 32]) {
fn claim_funds_internal(&self, source: HTLCSource, payment_preimage: PaymentPreimage, forwarded_htlc_value_msat: Option<u64>, from_onchain: bool, next_channel_outpoint: OutPoint) {
match source {
HTLCSource::OutboundRoute { session_priv, payment_id, path, .. } => {
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage, session_priv, path, from_onchain, &self.pending_events, &self.logger);
let ev_completion_action = EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: next_channel_outpoint,
counterparty_node_id: path[0].pubkey,
};
self.pending_outbound_payments.claim_htlc(payment_id, payment_preimage,
session_priv, path, from_onchain, ev_completion_action, &self.pending_events,
&self.logger);
},
HTLCSource::PreviousHopData(hop_data) => {
let prev_outpoint = hop_data.outpoint;
Expand All @@ -4194,14 +4204,11 @@ where
Some(claimed_htlc_value - forwarded_htlc_value)
} else { None };

let prev_channel_id = Some(prev_outpoint.to_channel_id());
let next_channel_id = Some(next_channel_id);

Some(MonitorUpdateCompletionAction::EmitEvent { event: events::Event::PaymentForwarded {
fee_earned_msat,
claim_from_onchain_tx: from_onchain,
prev_channel_id,
next_channel_id,
prev_channel_id: Some(prev_outpoint.to_channel_id()),
next_channel_id: Some(next_channel_outpoint.to_channel_id()),
}})
} else { None }
});
Expand Down Expand Up @@ -4889,6 +4896,7 @@ where
}

fn internal_update_fulfill_htlc(&self, counterparty_node_id: &PublicKey, msg: &msgs::UpdateFulfillHTLC) -> Result<(), MsgHandleErrInternal> {
let funding_txo;
let (htlc_source, forwarded_htlc_value) = {
let per_peer_state = self.per_peer_state.read().unwrap();
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
Expand All @@ -4900,12 +4908,14 @@ where
let peer_state = &mut *peer_state_lock;
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan)
let res = try_chan_entry!(self, chan.get_mut().update_fulfill_htlc(&msg), chan);
funding_txo = chan.get().get_funding_txo().expect("We won't accept a fulfill until funded");
res
},
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
}
};
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, msg.channel_id);
self.claim_funds_internal(htlc_source, msg.payment_preimage.clone(), Some(forwarded_htlc_value), false, funding_txo);
Ok(())
}

Expand Down Expand Up @@ -5082,7 +5092,14 @@ where
match peer_state.channel_by_id.entry(msg.channel_id) {
hash_map::Entry::Occupied(mut chan) => {
let funding_txo = chan.get().get_funding_txo();
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self, chan.get_mut().revoke_and_ack(&msg, &self.logger), chan);
let mon_update_blocked = self.pending_events.lock().unwrap().iter().any(|(_, action)| {
action == &Some(EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: funding_txo.expect("We won't accept an RAA until funded"),
counterparty_node_id: *counterparty_node_id,
})
});
let (htlcs_to_fail, monitor_update_opt) = try_chan_entry!(self,
chan.get_mut().revoke_and_ack(&msg, &self.logger, mon_update_blocked), chan);
let res = if let Some(monitor_update) = monitor_update_opt {
let update_res = self.chain_monitor.update_channel(funding_txo.unwrap(), monitor_update);
let update_id = monitor_update.update_id;
Expand Down Expand Up @@ -5261,7 +5278,7 @@ where
MonitorEvent::HTLCEvent(htlc_update) => {
if let Some(preimage) = htlc_update.payment_preimage {
log_trace!(self.logger, "Claiming HTLC with preimage {} from our monitor", log_bytes!(preimage.0));
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint.to_channel_id());
self.claim_funds_internal(htlc_update.source, preimage, htlc_update.htlc_value_satoshis.map(|v| v * 1000), true, funding_outpoint);
} else {
log_trace!(self.logger, "Failing HTLC with hash {} from our monitor", log_bytes!(htlc_update.payment_hash.0));
let receiver = HTLCDestination::NextHopChannel { node_id: counterparty_node_id, channel_id: funding_outpoint.to_channel_id() };
Expand Down Expand Up @@ -7842,7 +7859,13 @@ where
// generating a `PaymentPathSuccessful` event but regenerating
// it and the `PaymentSent` on every restart until the
// `ChannelMonitor` is removed.
pending_outbounds.claim_htlc(payment_id, preimage, session_priv, path, false, &pending_events, &args.logger);
let compl_action =
EventCompletionAction::ReleaseRAAChannelMonitorUpdate {
channel_funding_outpoint: monitor.get_funding_txo().0,
counterparty_node_id: path[0].pubkey,
};
pending_outbounds.claim_htlc(payment_id, preimage, session_priv,
path, false, compl_action, &pending_events, &args.logger);
pending_events_read = pending_events.into_inner().unwrap();
}
},
Expand Down Expand Up @@ -8237,6 +8260,7 @@ mod tests {

let bs_first_updates = get_htlc_update_msgs!(nodes[1], nodes[0].node.get_our_node_id());
nodes[0].node.handle_update_fulfill_htlc(&nodes[1].node.get_our_node_id(), &bs_first_updates.update_fulfill_htlcs[0]);
expect_payment_sent(&nodes[0], payment_preimage, None, false, false);
nodes[0].node.handle_commitment_signed(&nodes[1].node.get_our_node_id(), &bs_first_updates.commitment_signed);
check_added_monitors!(nodes[0], 1);
let (as_first_raa, as_first_cs) = get_revoke_commit_msgs!(nodes[0], nodes[1].node.get_our_node_id());
Expand Down Expand Up @@ -8264,24 +8288,16 @@ mod tests {
// Note that successful MPP payments will generate a single PaymentSent event upon the first
// path's success and a PaymentPathSuccessful event for each path's success.
let events = nodes[0].node.get_and_clear_pending_events();
assert_eq!(events.len(), 3);
assert_eq!(events.len(), 2);
match events[0] {
Event::PaymentSent { payment_id: ref id, payment_preimage: ref preimage, payment_hash: ref hash, .. } => {
assert_eq!(Some(payment_id), *id);
assert_eq!(payment_preimage, *preimage);
assert_eq!(our_payment_hash, *hash);
},
_ => panic!("Unexpected event"),
}
match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
assert_eq!(route.paths[0], *path);
},
_ => panic!("Unexpected event"),
}
match events[2] {
match events[1] {
Event::PaymentPathSuccessful { payment_id: ref actual_payment_id, ref payment_hash, ref path } => {
assert_eq!(payment_id, *actual_payment_id);
assert_eq!(our_payment_hash, *payment_hash.as_ref().unwrap());
Expand Down
Loading

0 comments on commit c7e8f27

Please sign in to comment.