From 396c36b6ea2335acabd297eb7ed1acaac69a80ea Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 29 Dec 2023 00:45:07 +0000 Subject: [PATCH 1/6] Consider `MONITOR_UPDATE_IN_PROGRESS` as unbroadcasted funding If we promote our channel to `AwaitingChannelReady` after adding funding info, but still have `MONITOR_UPDATE_IN_PROGRESS` set, we haven't broadcasted the funding transaction yet and thus should return values from `unbroadcasted_funding[_txid]` and generate a `DiscardFunding` event. --- lightning/src/ln/channel.rs | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 588995656ec..1f50c76147d 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2316,15 +2316,17 @@ impl ChannelContext where SP::Target: SignerProvider { res } - fn if_unbroadcasted_funding(&self, f: F) -> Option - where F: Fn() -> Option { + fn if_unbroadcasted_funding(&self, f: F) -> Option where F: Fn() -> Option { match self.channel_state { ChannelState::FundingNegotiated => f(), - ChannelState::AwaitingChannelReady(flags) => if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) { - f() - } else { - None - }, + ChannelState::AwaitingChannelReady(flags) => + if flags.is_set(AwaitingChannelReadyFlags::WAITING_FOR_BATCH) || + flags.is_set(FundedStateFlags::MONITOR_UPDATE_IN_PROGRESS.into()) + { + f() + } else { + None + }, _ => None, } } From ee5b8c7003af296bc8189b0b6075889a106513c5 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 29 Dec 2023 03:15:18 +0000 Subject: [PATCH 2/6] Move DiscardFunding generation into finish_close_channel Currently the channel shutdown sequence has a number of steps which all the shutdown callsites have to call. Because many shutdown cases are rare error cases, its relatively easy to miss a call and leave users without `Event`s or miss some important cleanup. One of those steps, calling `issue_channel_close_events`, is rather easy to remove, as it only generates two events, which can simply be moved to another shutdown step. Here we move the first of the two events, `DiscardFunding`, into `finish_force_close_channel`. --- lightning/src/ln/channel.rs | 5 +++++ lightning/src/ln/channelmanager.rs | 17 +++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 1f50c76147d..dee972f39ec 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -823,6 +823,7 @@ pub(crate) struct ShutdownResult { pub(crate) unbroadcasted_batch_funding_txid: Option, pub(crate) channel_id: ChannelId, pub(crate) counterparty_node_id: PublicKey, + pub(crate) unbroadcasted_funding_tx: Option, } /// If the majority of the channels funds are to the fundee and the initiator holds only just @@ -2402,6 +2403,7 @@ impl ChannelContext where SP::Target: SignerProvider { } else { None } } else { None }; let unbroadcasted_batch_funding_txid = self.unbroadcasted_batch_funding_txid(); + let unbroadcasted_funding_tx = self.unbroadcasted_funding(); self.channel_state = ChannelState::ShutdownComplete; self.update_time_counter += 1; @@ -2411,6 +2413,7 @@ impl ChannelContext where SP::Target: SignerProvider { unbroadcasted_batch_funding_txid, channel_id: self.channel_id, counterparty_node_id: self.counterparty_node_id, + unbroadcasted_funding_tx, } } @@ -4943,6 +4946,7 @@ impl Channel where unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), channel_id: self.context.channel_id, counterparty_node_id: self.context.counterparty_node_id, + unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), }; let tx = self.build_signed_closing_transaction(&mut closing_tx, &msg.signature, &sig); self.context.channel_state = ChannelState::ShutdownComplete; @@ -4973,6 +4977,7 @@ impl Channel where unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), channel_id: self.context.channel_id, counterparty_node_id: self.context.counterparty_node_id, + unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), }; self.context.channel_state = ChannelState::ShutdownComplete; self.context.update_time_counter += 1; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b2b28cdf365..51ea72ff418 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2704,14 +2704,6 @@ where /// Helper function that issues the channel close events fn issue_channel_close_events(&self, context: &ChannelContext, closure_reason: ClosureReason) { let mut pending_events_lock = self.pending_events.lock().unwrap(); - match context.unbroadcasted_funding() { - Some(transaction) => { - pending_events_lock.push_back((events::Event::DiscardFunding { - channel_id: context.channel_id(), transaction - }, None)); - }, - None => {}, - } pending_events_lock.push_back((events::Event::ChannelClosed { channel_id: context.channel_id(), user_channel_id: context.get_user_id(), @@ -2897,6 +2889,15 @@ where "Closing a batch where all channels have completed initial monitor update", ); } + + { + let mut pending_events = self.pending_events.lock().unwrap(); + if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { + pending_events.push_back((events::Event::DiscardFunding { + channel_id: shutdown_res.channel_id, transaction + }, None)); + } + } for shutdown_result in shutdown_results.drain(..) { self.finish_close_channel(shutdown_result); } From 080865dff975a57cc9b88a7678eee2f963847bc4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 29 Dec 2023 03:23:59 +0000 Subject: [PATCH 3/6] Move ChannelClosed generation into finish_close_channel Currently the channel shutdown sequence has a number of steps which all the shutdown callsites have to call. Because many shutdown cases are rare error cases, its relatively easy to miss a call and leave users without `Event`s or miss some important cleanup. One of those steps, calling `issue_channel_close_events`, is rather easy to remove, as it only generates two events, which can simply be moved to another shutdown step. Here we remove `issue_channel_close_events` by moving `ChannelClosed` event generation into `finish_force_close_channel`. --- lightning/src/ln/channel.rs | 14 +++++- lightning/src/ln/channelmanager.rs | 74 ++++++++++------------------ lightning/src/ln/functional_tests.rs | 34 ++++++------- 3 files changed, 55 insertions(+), 67 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index dee972f39ec..7026ab6d376 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -814,6 +814,7 @@ pub(super) struct ReestablishResponses { /// The result of a shutdown that should be handled. #[must_use] pub(crate) struct ShutdownResult { + pub(crate) closure_reason: ClosureReason, /// A channel monitor update to apply. pub(crate) monitor_update: Option<(PublicKey, OutPoint, ChannelMonitorUpdate)>, /// A list of dropped outbound HTLCs that can safely be failed backwards immediately. @@ -822,6 +823,8 @@ pub(crate) struct ShutdownResult { /// propagated to the remainder of the batch. pub(crate) unbroadcasted_batch_funding_txid: Option, pub(crate) channel_id: ChannelId, + pub(crate) user_channel_id: u128, + pub(crate) channel_capacity_satoshis: u64, pub(crate) counterparty_node_id: PublicKey, pub(crate) unbroadcasted_funding_tx: Option, } @@ -2362,7 +2365,7 @@ impl ChannelContext where SP::Target: SignerProvider { /// those explicitly stated to be allowed after shutdown completes, eg some simple getters). /// Also returns the list of payment_hashes for channels which we can safely fail backwards /// immediately (others we will have to allow to time out). - pub fn force_shutdown(&mut self, should_broadcast: bool) -> ShutdownResult { + pub fn force_shutdown(&mut self, should_broadcast: bool, closure_reason: ClosureReason) -> ShutdownResult { // Note that we MUST only generate a monitor update that indicates force-closure - we're // called during initialization prior to the chain_monitor in the encompassing ChannelManager // being fully configured in some cases. Thus, its likely any monitor events we generate will @@ -2408,10 +2411,13 @@ impl ChannelContext where SP::Target: SignerProvider { self.channel_state = ChannelState::ShutdownComplete; self.update_time_counter += 1; ShutdownResult { + closure_reason, monitor_update, dropped_outbound_htlcs, unbroadcasted_batch_funding_txid, channel_id: self.channel_id, + user_channel_id: self.user_id, + channel_capacity_satoshis: self.channel_value_satoshis, counterparty_node_id: self.counterparty_node_id, unbroadcasted_funding_tx, } @@ -4941,10 +4947,13 @@ impl Channel where if let Some((last_fee, sig)) = self.context.last_sent_closing_fee { if last_fee == msg.fee_satoshis { let shutdown_result = ShutdownResult { + closure_reason: ClosureReason::CooperativeClosure, monitor_update: None, dropped_outbound_htlcs: Vec::new(), unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), channel_id: self.context.channel_id, + user_channel_id: self.context.user_id, + channel_capacity_satoshis: self.context.channel_value_satoshis, counterparty_node_id: self.context.counterparty_node_id, unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), }; @@ -4972,10 +4981,13 @@ impl Channel where .map_err(|_| ChannelError::Close("External signer refused to sign closing transaction".to_owned()))?; let (signed_tx, shutdown_result) = if $new_fee == msg.fee_satoshis { let shutdown_result = ShutdownResult { + closure_reason: ClosureReason::CooperativeClosure, monitor_update: None, dropped_outbound_htlcs: Vec::new(), unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), channel_id: self.context.channel_id, + user_channel_id: self.context.user_id, + channel_capacity_satoshis: self.context.channel_value_satoshis, counterparty_node_id: self.context.counterparty_node_id, unbroadcasted_funding_tx: self.context.unbroadcasted_funding(), }; diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 51ea72ff418..f19878f5170 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -1966,14 +1966,6 @@ macro_rules! handle_error { msg: update }); } - if let Some((channel_id, user_channel_id)) = chan_id { - $self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed { - channel_id, user_channel_id, - reason: ClosureReason::ProcessingError { err: err.err.clone() }, - counterparty_node_id: Some($counterparty_node_id), - channel_capacity_sats: channel_capacity, - }, None)); - } } let logger = WithContext::from( @@ -2039,7 +2031,8 @@ macro_rules! convert_chan_phase_err { let logger = WithChannelContext::from(&$self.logger, &$channel.context); log_error!(logger, "Closing channel {} due to close-required error: {}", $channel_id, msg); update_maps_on_chan_removal!($self, $channel.context); - let shutdown_res = $channel.context.force_shutdown(true); + let reason = ClosureReason::ProcessingError { err: msg.clone() }; + let shutdown_res = $channel.context.force_shutdown(true, reason); let user_id = $channel.context.get_user_id(); let channel_capacity_satoshis = $channel.context.get_value_satoshis(); @@ -2701,18 +2694,6 @@ where .collect() } - /// Helper function that issues the channel close events - fn issue_channel_close_events(&self, context: &ChannelContext, closure_reason: ClosureReason) { - let mut pending_events_lock = self.pending_events.lock().unwrap(); - pending_events_lock.push_back((events::Event::ChannelClosed { - channel_id: context.channel_id(), - user_channel_id: context.get_user_id(), - reason: closure_reason, - counterparty_node_id: Some(context.get_counterparty_node_id()), - channel_capacity_sats: Some(context.get_value_satoshis()), - }, None)); - } - fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); @@ -2754,9 +2735,8 @@ where peer_state_lock, peer_state, per_peer_state, chan); } } else { - self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); - shutdown_result = Some(chan_phase.context_mut().force_shutdown(false)); + shutdown_result = Some(chan_phase.context_mut().force_shutdown(false, ClosureReason::HolderForceClosed)); } }, hash_map::Entry::Vacant(_) => { @@ -2853,6 +2833,7 @@ where let logger = WithContext::from( &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), ); + log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len()); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; @@ -2878,8 +2859,7 @@ where let mut peer_state = peer_state_mutex.lock().unwrap(); if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) { update_maps_on_chan_removal!(self, &chan.context()); - self.issue_channel_close_events(&chan.context(), ClosureReason::FundingBatchClosure); - shutdown_results.push(chan.context_mut().force_shutdown(false)); + shutdown_results.push(chan.context_mut().force_shutdown(false, ClosureReason::FundingBatchClosure)); } } has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state)); @@ -2892,6 +2872,14 @@ where { let mut pending_events = self.pending_events.lock().unwrap(); + pending_events.push_back((events::Event::ChannelClosed { + channel_id: shutdown_res.channel_id, + user_channel_id: shutdown_res.user_channel_id, + reason: shutdown_res.closure_reason, + counterparty_node_id: Some(shutdown_res.counterparty_node_id), + channel_capacity_sats: Some(shutdown_res.channel_capacity_satoshis), + }, None)); + if let Some(transaction) = shutdown_res.unbroadcasted_funding_tx { pending_events.push_back((events::Event::DiscardFunding { channel_id: shutdown_res.channel_id, transaction @@ -2920,17 +2908,16 @@ where let logger = WithContext::from(&self.logger, Some(*peer_node_id), Some(*channel_id)); if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(channel_id.clone()) { log_error!(logger, "Force-closing channel {}", channel_id); - self.issue_channel_close_events(&chan_phase_entry.get().context(), closure_reason); let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); mem::drop(peer_state); mem::drop(per_peer_state); match chan_phase { ChannelPhase::Funded(mut chan) => { - self.finish_close_channel(chan.context.force_shutdown(broadcast)); + self.finish_close_channel(chan.context.force_shutdown(broadcast, closure_reason)); (self.get_channel_update_for_broadcast(&chan).ok(), chan.context.get_counterparty_node_id()) }, ChannelPhase::UnfundedOutboundV1(_) | ChannelPhase::UnfundedInboundV1(_) => { - self.finish_close_channel(chan_phase.context_mut().force_shutdown(false)); + self.finish_close_channel(chan_phase.context_mut().force_shutdown(false, closure_reason)); // Unfunded channel has no update (None, chan_phase.context().get_counterparty_node_id()) }, @@ -3760,7 +3747,8 @@ where .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); let user_id = chan.context.get_user_id(); - let shutdown_res = chan.context.force_shutdown(false); + let reason = ClosureReason::ProcessingError { err: msg.clone() }; + let shutdown_res = chan.context.force_shutdown(false, reason); let channel_capacity = chan.context.get_value_satoshis(); (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity)) } else { unreachable!(); }); @@ -3967,8 +3955,8 @@ where .and_then(|mut peer_state| peer_state.channel_by_id.remove(&channel_id)) .map(|mut chan| { update_maps_on_chan_removal!(self, &chan.context()); - self.issue_channel_close_events(&chan.context(), ClosureReason::ProcessingError { err: e.clone() }); - shutdown_results.push(chan.context_mut().force_shutdown(false)); + let closure_reason = ClosureReason::ProcessingError { err: e.clone() }; + shutdown_results.push(chan.context_mut().force_shutdown(false, closure_reason)); }); } } @@ -4890,8 +4878,7 @@ where log_error!(logger, "Force-closing pending channel with ID {} for not establishing in a timely manner", chan_id); update_maps_on_chan_removal!(self, &context); - self.issue_channel_close_events(&context, ClosureReason::HolderForceClosed); - shutdown_channels.push(context.force_shutdown(false)); + shutdown_channels.push(context.force_shutdown(false, ClosureReason::HolderForceClosed)); pending_msg_events.push(MessageSendEvent::HandleError { node_id: counterparty_node_id, action: msgs::ErrorAction::SendErrorMessage { @@ -6511,9 +6498,8 @@ where let context = phase.context_mut(); let logger = WithChannelContext::from(&self.logger, context); log_error!(logger, "Immediately closing unfunded channel {} as peer asked to cooperatively shut it down (which is unnecessary)", &msg.channel_id); - self.issue_channel_close_events(&context, ClosureReason::CounterpartyCoopClosedUnfundedChannel); let mut chan = remove_channel_phase!(self, chan_phase_entry); - finish_shutdown = Some(chan.context_mut().force_shutdown(false)); + finish_shutdown = Some(chan.context_mut().force_shutdown(false, ClosureReason::CounterpartyCoopClosedUnfundedChannel)); }, } } else { @@ -6582,7 +6568,6 @@ where msg: update }); } - self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); } mem::drop(per_peer_state); if let Some(shutdown_result) = shutdown_result { @@ -7234,13 +7219,12 @@ where let pending_msg_events = &mut peer_state.pending_msg_events; if let hash_map::Entry::Occupied(chan_phase_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) { if let ChannelPhase::Funded(mut chan) = remove_channel_phase!(self, chan_phase_entry) { - failed_channels.push(chan.context.force_shutdown(false)); + failed_channels.push(chan.context.force_shutdown(false, ClosureReason::HolderForceClosed)); if let Ok(update) = self.get_channel_update_for_broadcast(&chan) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: chan.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { @@ -7427,8 +7411,6 @@ where }); } - self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure); - log_info!(logger, "Broadcasting {}", log_tx!(tx)); self.tx_broadcaster.broadcast_transactions(&[&tx]); update_maps_on_chan_removal!(self, &chan.context); @@ -8441,14 +8423,13 @@ where update_maps_on_chan_removal!(self, &channel.context); // It looks like our counterparty went on-chain or funding transaction was // reorged out of the main chain. Close the channel. - failed_channels.push(channel.context.force_shutdown(true)); + let reason_message = format!("{}", reason); + failed_channels.push(channel.context.force_shutdown(true, reason)); if let Ok(update) = self.get_channel_update_for_broadcast(&channel) { pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } - let reason_message = format!("{}", reason); - self.issue_channel_close_events(&channel.context, reason); pending_msg_events.push(events::MessageSendEvent::HandleError { node_id: channel.context.get_counterparty_node_id(), action: msgs::ErrorAction::DisconnectPeer { @@ -8846,8 +8827,7 @@ where }; // Clean up for removal. update_maps_on_chan_removal!(self, &context); - self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer); - failed_channels.push(context.force_shutdown(false)); + failed_channels.push(context.force_shutdown(false, ClosureReason::DisconnectedPeer)); false }); // Note that we don't bother generating any events for pre-accept channels - @@ -10293,7 +10273,7 @@ where log_error!(logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.", &channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number()); } - let mut shutdown_result = channel.context.force_shutdown(true); + let mut shutdown_result = channel.context.force_shutdown(true, ClosureReason::OutdatedChannelManager); if shutdown_result.unbroadcasted_batch_funding_txid.is_some() { return Err(DecodeError::InvalidValue); } @@ -10355,7 +10335,7 @@ where // If we were persisted and shut down while the initial ChannelMonitor persistence // was in-progress, we never broadcasted the funding transaction and can still // safely discard the channel. - let _ = channel.context.force_shutdown(false); + let _ = channel.context.force_shutdown(false, ClosureReason::DisconnectedPeer); channel_closures.push_back((events::Event::ChannelClosed { channel_id: channel.context.channel_id(), user_channel_id: channel.context.get_user_id(), diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 2ad53faa8b2..ef0fcc92671 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -3328,22 +3328,18 @@ fn do_test_commitment_revoked_fail_backward_exhaustive(deliver_bs_raa: bool, use let events = nodes[1].node.get_and_clear_pending_events(); assert_eq!(events.len(), if deliver_bs_raa { 3 + nodes.len() - 1 } else { 4 + nodes.len() }); - match events[0] { - Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } => { }, - _ => panic!("Unexepected event"), - } - match events[1] { - Event::PaymentPathFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, fourth_payment_hash); - }, - _ => panic!("Unexpected event"), - } - match events[2] { - Event::PaymentFailed { ref payment_hash, .. } => { - assert_eq!(*payment_hash, fourth_payment_hash); - }, - _ => panic!("Unexpected event"), - } + assert!(events.iter().any(|ev| matches!( + ev, + Event::ChannelClosed { reason: ClosureReason::CommitmentTxConfirmed, .. } + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentPathFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash + ))); + assert!(events.iter().any(|ev| matches!( + ev, + Event::PaymentFailed { ref payment_hash, .. } if *payment_hash == fourth_payment_hash + ))); nodes[1].node.process_pending_htlc_forwards(); check_added_monitors!(nodes[1], 1); @@ -9131,16 +9127,16 @@ fn test_duplicate_chan_id() { chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() }, _ => panic!("Unexpected ChannelPhase variant"), - } + }.unwrap() }; check_added_monitors!(nodes[0], 0); - nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created.unwrap()); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created); // At this point we'll look up if the channel_id is present and immediately fail the channel // without trying to persist the `ChannelMonitor`. check_added_monitors!(nodes[1], 0); check_closed_events(&nodes[1], &[ - ExpectedCloseEvent::from_id_reason(channel_id, false, ClosureReason::ProcessingError { + ExpectedCloseEvent::from_id_reason(funding_created.temporary_channel_id, false, ClosureReason::ProcessingError { err: "Already had channel with the new channel_id".to_owned() }) ]); From bfe911dadcd31ec0e3a2e866d3e154e9781498a4 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Fri, 29 Dec 2023 03:55:52 +0000 Subject: [PATCH 4/6] Drop now-unused fields from `MsgHandleErrInternal` --- lightning/src/ln/channelmanager.rs | 48 +++++++++++++--------------- lightning/src/ln/functional_tests.rs | 8 ++--- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index f19878f5170..02852877d93 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -545,9 +545,8 @@ impl Into for FailureCode { struct MsgHandleErrInternal { err: msgs::LightningError, - chan_id: Option<(ChannelId, u128)>, // If Some a channel of ours has been closed + closes_channel: bool, shutdown_finish: Option<(ShutdownResult, Option)>, - channel_capacity: Option, } impl MsgHandleErrInternal { #[inline] @@ -562,17 +561,16 @@ impl MsgHandleErrInternal { }, }, }, - chan_id: None, + closes_channel: false, shutdown_finish: None, - channel_capacity: None, } } #[inline] fn from_no_close(err: msgs::LightningError) -> Self { - Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None } + Self { err, closes_channel: false, shutdown_finish: None } } #[inline] - fn from_finish_shutdown(err: String, channel_id: ChannelId, user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option, channel_capacity: u64) -> Self { + fn from_finish_shutdown(err: String, channel_id: ChannelId, shutdown_res: ShutdownResult, channel_update: Option) -> Self { let err_msg = msgs::ErrorMessage { channel_id, data: err.clone() }; let action = if shutdown_res.monitor_update.is_some() { // We have a closing `ChannelMonitorUpdate`, which means the channel was funded and we @@ -584,9 +582,8 @@ impl MsgHandleErrInternal { }; Self { err: LightningError { err, action }, - chan_id: Some((channel_id, user_channel_id)), + closes_channel: true, shutdown_finish: Some((shutdown_res, channel_update)), - channel_capacity: Some(channel_capacity) } } #[inline] @@ -617,14 +614,13 @@ impl MsgHandleErrInternal { }, }, }, - chan_id: None, + closes_channel: false, shutdown_finish: None, - channel_capacity: None, } } fn closes_channel(&self) -> bool { - self.chan_id.is_some() + self.closes_channel } } @@ -1956,22 +1952,27 @@ macro_rules! handle_error { match $internal { Ok(msg) => Ok(msg), - Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => { + Err(MsgHandleErrInternal { err, shutdown_finish, .. }) => { let mut msg_events = Vec::with_capacity(2); if let Some((shutdown_res, update_option)) = shutdown_finish { + let counterparty_node_id = shutdown_res.counterparty_node_id; + let channel_id = shutdown_res.channel_id; + let logger = WithContext::from( + &$self.logger, Some(counterparty_node_id), Some(channel_id), + ); + log_error!(logger, "Force-closing channel: {}", err.err); + $self.finish_close_channel(shutdown_res); if let Some(update) = update_option { msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { msg: update }); } + } else { + log_error!($self.logger, "Got non-closing error: {}", err.err); } - let logger = WithContext::from( - &$self.logger, Some($counterparty_node_id), chan_id.map(|(chan_id, _)| chan_id) - ); - log_error!(logger, "{}", err.err); if let msgs::ErrorAction::IgnoreError = err.action { } else { msg_events.push(events::MessageSendEvent::HandleError { @@ -2033,11 +2034,9 @@ macro_rules! convert_chan_phase_err { update_maps_on_chan_removal!($self, $channel.context); let reason = ClosureReason::ProcessingError { err: msg.clone() }; let shutdown_res = $channel.context.force_shutdown(true, reason); - let user_id = $channel.context.get_user_id(); - let channel_capacity_satoshis = $channel.context.get_value_satoshis(); - - (true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, user_id, - shutdown_res, $channel_update, channel_capacity_satoshis)) + let err = + MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, shutdown_res, $channel_update); + (true, err) }, } }; @@ -2834,7 +2833,8 @@ where &self.logger, Some(shutdown_res.counterparty_node_id), Some(shutdown_res.channel_id), ); - log_debug!(logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len()); + log_debug!(logger, "Finishing closure of channel due to {} with {} HTLCs to fail", + shutdown_res.closure_reason, shutdown_res.dropped_outbound_htlcs.len()); for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) { let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source; let reason = HTLCFailReason::from_failure_code(0x4000 | 8); @@ -3746,11 +3746,9 @@ where let funding_res = chan.get_funding_created(funding_transaction, funding_txo, is_batch_funding, &&logger) .map_err(|(mut chan, e)| if let ChannelError::Close(msg) = e { let channel_id = chan.context.channel_id(); - let user_id = chan.context.get_user_id(); let reason = ClosureReason::ProcessingError { err: msg.clone() }; let shutdown_res = chan.context.force_shutdown(false, reason); - let channel_capacity = chan.context.get_value_satoshis(); - (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity)) + (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, shutdown_res, None)) } else { unreachable!(); }); match funding_res { Ok(funding_msg) => (chan, funding_msg), diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index ef0fcc92671..f86eb53e283 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -768,7 +768,7 @@ fn test_update_fee_that_funder_cannot_afford() { //check to see if the funder, who sent the update_fee request, can afford the new fee (funder_balance >= fee+channel_reserve) //Should produce and error. nodes[1].node.handle_commitment_signed(&nodes[0].node.get_our_node_id(), &commit_signed_msg); - nodes[1].logger.assert_log("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Funding remote cannot afford proposed new fee", 3); check_added_monitors!(nodes[1], 1); check_closed_broadcast!(nodes[1], true); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: String::from("Funding remote cannot afford proposed new fee") }, @@ -1617,7 +1617,7 @@ fn test_chan_reserve_violation_inbound_htlc_outbound_channel() { nodes[0].node.handle_update_add_htlc(&nodes[1].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[0].logger.assert_log("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value".to_string(), 1); + nodes[0].logger.assert_log_contains("lightning::ln::channelmanager", "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value", 3); assert_eq!(nodes[0].node.list_channels().len(), 0); let err_msg = check_closed_broadcast!(nodes[0], true).unwrap(); assert_eq!(err_msg.data, "Cannot accept HTLC that would put our balance under counterparty-announced channel reserve value"); @@ -1796,7 +1796,7 @@ fn test_chan_reserve_violation_inbound_htlc_inbound_chan() { nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &msg); // Check that the payment failed and the channel is closed in response to the malicious UpdateAdd. - nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote HTLC add would put them under remote reserve value", 3); assert_eq!(nodes[1].node.list_channels().len(), 1); let err_msg = check_closed_broadcast!(nodes[1], true).unwrap(); assert_eq!(err_msg.data, "Remote HTLC add would put them under remote reserve value"); @@ -6295,7 +6295,7 @@ fn test_update_add_htlc_bolt2_receiver_zero_value_msat() { updates.update_add_htlcs[0].amount_msat = 0; nodes[1].node.handle_update_add_htlc(&nodes[0].node.get_our_node_id(), &updates.update_add_htlcs[0]); - nodes[1].logger.assert_log("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC".to_string(), 1); + nodes[1].logger.assert_log_contains("lightning::ln::channelmanager", "Remote side tried to send a 0-msat HTLC", 3); check_closed_broadcast!(nodes[1], true).unwrap(); check_added_monitors!(nodes[1], 1); check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "Remote side tried to send a 0-msat HTLC".to_string() }, From 5eea3058c921aa4c0739dc10565ee8d236302274 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Tue, 26 Dec 2023 18:16:51 +0000 Subject: [PATCH 5/6] Do not panic if a peer learns our funding info before we fund We'd previously assumed that LDK would receive `funding_transaction_generated` prior to our peer learning the txid and panicked if the peer tried to open a redundant channel to us with the same funding outpoint. While this assumption is generally safe, some users may have out-of-band protocols where they notify their LSP about a funding outpoint first, or this may be violated in the future with collaborative transaction construction protocols, i.e. the upcoming dual-funding protocol. --- lightning/src/ln/channelmanager.rs | 18 ++++++-- lightning/src/ln/functional_test_utils.rs | 7 ++- lightning/src/ln/functional_tests.rs | 55 +++++++++++++++++++++++ 3 files changed, 76 insertions(+), 4 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 02852877d93..7fe99607528 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3738,7 +3738,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let funding_txo; - let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) { + let (mut chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) { Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { funding_txo = find_funding_output(&chan, &funding_transaction)?; @@ -3788,8 +3788,20 @@ where }, hash_map::Entry::Vacant(e) => { let mut outpoint_to_peer = self.outpoint_to_peer.lock().unwrap(); - if outpoint_to_peer.insert(funding_txo, chan.context.get_counterparty_node_id()).is_some() { - panic!("outpoint_to_peer map already contained funding outpoint, which shouldn't be possible"); + match outpoint_to_peer.entry(funding_txo) { + hash_map::Entry::Vacant(e) => { e.insert(chan.context.get_counterparty_node_id()); }, + hash_map::Entry::Occupied(o) => { + let err = format!( + "An existing channel using outpoint {} is open with peer {}", + funding_txo, o.get() + ); + mem::drop(outpoint_to_peer); + mem::drop(peer_state_lock); + mem::drop(per_peer_state); + let reason = ClosureReason::ProcessingError { err: err.clone() }; + self.finish_close_channel(chan.context.force_shutdown(true, reason)); + return Err(APIError::ChannelUnavailable { err }); + } } e.insert(ChannelPhase::UnfundedOutboundV1(chan)); } diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index a2d9631716c..2adfe2f9379 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1218,7 +1218,7 @@ pub fn open_zero_conf_channel<'a, 'b, 'c, 'd>(initiator: &'a Node<'b, 'c, 'd>, r (tx, as_channel_ready.channel_id) } -pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> Transaction { +pub fn exchange_open_accept_chan<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> ChannelId { let create_chan_id = node_a.node.create_channel(node_b.node.get_our_node_id(), channel_value, push_msat, 42, None, None).unwrap(); let open_channel_msg = get_event_msg!(node_a, MessageSendEvent::SendOpenChannel, node_b.node.get_our_node_id()); assert_eq!(open_channel_msg.temporary_channel_id, create_chan_id); @@ -1238,6 +1238,11 @@ pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, ' node_a.node.handle_accept_channel(&node_b.node.get_our_node_id(), &accept_channel_msg); assert_ne!(node_b.node.list_channels().iter().find(|channel| channel.channel_id == create_chan_id).unwrap().user_channel_id, 0); + create_chan_id +} + +pub fn create_chan_between_nodes_with_value_init<'a, 'b, 'c>(node_a: &Node<'a, 'b, 'c>, node_b: &Node<'a, 'b, 'c>, channel_value: u64, push_msat: u64) -> Transaction { + let create_chan_id = exchange_open_accept_chan(node_a, node_b, channel_value, push_msat); sign_funding_transaction(node_a, node_b, channel_value, create_chan_id) } diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index f86eb53e283..9777ad75d23 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -8977,6 +8977,61 @@ fn test_duplicate_temporary_channel_id_from_different_peers() { } } +#[test] +fn test_peer_funding_sidechannel() { + // Test that if a peer somehow learns which txid we'll use for our channel funding before we + // receive `funding_transaction_generated` the peer cannot cause us to crash. We'd previously + // assumed that LDK would receive `funding_transaction_generated` prior to our peer learning + // the txid and panicked if the peer tried to open a redundant channel to us with the same + // funding outpoint. + // + // While this assumption is generally safe, some users may have out-of-band protocols where + // they notify their LSP about a funding outpoint first, or this may be violated in the future + // with collaborative transaction construction protocols, i.e. dual-funding. + let chanmon_cfgs = create_chanmon_cfgs(3); + let node_cfgs = create_node_cfgs(3, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(3, &node_cfgs, &[None, None, None]); + let nodes = create_network(3, &node_cfgs, &node_chanmgrs); + + let temp_chan_id_ab = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); + let temp_chan_id_ca = exchange_open_accept_chan(&nodes[2], &nodes[0], 1_000_000, 0); + + let (_, tx, funding_output) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + + let cs_funding_events = nodes[2].node.get_and_clear_pending_events(); + assert_eq!(cs_funding_events.len(), 1); + match cs_funding_events[0] { + Event::FundingGenerationReady { .. } => {} + _ => panic!("Unexpected event {:?}", cs_funding_events), + } + + nodes[2].node.funding_transaction_generated_unchecked(&temp_chan_id_ca, &nodes[0].node.get_our_node_id(), tx.clone(), funding_output.index).unwrap(); + let funding_created_msg = get_event_msg!(nodes[2], MessageSendEvent::SendFundingCreated, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_created(&nodes[2].node.get_our_node_id(), &funding_created_msg); + get_event_msg!(nodes[0], MessageSendEvent::SendFundingSigned, nodes[2].node.get_our_node_id()); + expect_channel_pending_event(&nodes[0], &nodes[2].node.get_our_node_id()); + check_added_monitors!(nodes[0], 1); + + let res = nodes[0].node.funding_transaction_generated(&temp_chan_id_ab, &nodes[1].node.get_our_node_id(), tx.clone()); + let err_msg = format!("{:?}", res.unwrap_err()); + assert!(err_msg.contains("An existing channel using outpoint ")); + assert!(err_msg.contains(" is open with peer")); + // Even though the last funding_transaction_generated errored, it still generated a + // SendFundingCreated. However, when the peer responds with a funding_signed it will send the + // appropriate error message. + let as_funding_created = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &as_funding_created); + check_added_monitors!(nodes[1], 1); + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + let reason = ClosureReason::ProcessingError { err: format!("An existing channel using outpoint {} is open with peer {}", funding_output, nodes[2].node.get_our_node_id()), }; + check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_output.to_channel_id(), true, reason)]); + + let funding_signed = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed); + get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); +} + #[test] fn test_duplicate_funding_err_in_funding() { // Test that if we have a live channel with one peer, then another peer comes along and tries From 7bc2a148b2525913c9c6e70fe15449fe4835ff2e Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Mon, 25 Dec 2023 06:55:08 +0000 Subject: [PATCH 6/6] Fix handling of duplicate initial `ChannelMonitor` writing In e06484b0f44155e647ff29810d2f187967e45813, we added specific handling for outbound-channel initial monitor updates failing - in such a case we have a counterparty who tried to open a second channel with the same funding info we just gave them, causing us to force-close our outbound channel as it shows up as duplicate-funding. Its largely harmless as it leads to a spurious force-closure of a channel with a peer doing something absurd, however it causes the `full_stack_target` fuzzer to fail. Sadly, in 574c77e7bc95fd8dea5a8058b6b35996cc99db8d, as we were dropping handling of `PermanentFailure` handling for updates, we accidentally dropped handling for initial updates as well. Here we fix the issue (again) and add a test. --- lightning/src/ln/channelmanager.rs | 7 ++++- lightning/src/ln/functional_tests.rs | 40 ++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 7fe99607528..81b9d766d7b 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -6374,7 +6374,7 @@ where let res = chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger); match res { - Ok((chan, monitor)) => { + Ok((mut chan, monitor)) => { if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { // We really should be able to insert here without doing a second // lookup, but sadly rust stdlib doesn't currently allow keeping @@ -6386,6 +6386,11 @@ where Ok(()) } else { let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned()); + // We weren't able to watch the channel to begin with, so no + // updates should be made on it. Previously, full_stack_target + // found an (unreachable) panic when the monitor update contained + // within `shutdown_finish` was applied. + chan.unset_funding_info(msg.channel_id); return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1); } }, diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 9777ad75d23..78207c57b45 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9032,6 +9032,46 @@ fn test_peer_funding_sidechannel() { get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); } +#[test] +fn test_duplicate_conflicting_funding_from_second_peer() { + // Test that if a user tries to fund a channel with a funding outpoint they'd previously used + // we don't try to remove the previous ChannelMonitor. This is largely a test to ensure we + // don't regress in the fuzzer, as such funding getting passed our outpoint-matches checks + // implies the user (and our counterparty) has reused cryptographic keys across channels, which + // we require the user not do. + let chanmon_cfgs = create_chanmon_cfgs(4); + let node_cfgs = create_node_cfgs(4, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(4, &node_cfgs, &[None, None, None, None]); + let nodes = create_network(4, &node_cfgs, &node_chanmgrs); + + let temp_chan_id = exchange_open_accept_chan(&nodes[0], &nodes[1], 1_000_000, 0); + + let (_, tx, funding_output) = + create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 1_000_000, 42); + + // Now that we have a funding outpoint, create a dummy `ChannelMonitor` and insert it into + // nodes[0]'s ChainMonitor so that the initial `ChannelMonitor` write fails. + let dummy_chan_id = create_chan_between_nodes(&nodes[2], &nodes[3]).3; + let dummy_monitor = get_monitor!(nodes[2], dummy_chan_id).clone(); + nodes[0].chain_monitor.chain_monitor.watch_channel(funding_output, dummy_monitor).unwrap(); + + nodes[0].node.funding_transaction_generated(&temp_chan_id, &nodes[1].node.get_our_node_id(), tx.clone()).unwrap(); + + let mut funding_created_msg = get_event_msg!(nodes[0], MessageSendEvent::SendFundingCreated, nodes[1].node.get_our_node_id()); + nodes[1].node.handle_funding_created(&nodes[0].node.get_our_node_id(), &funding_created_msg); + let funding_signed_msg = get_event_msg!(nodes[1], MessageSendEvent::SendFundingSigned, nodes[0].node.get_our_node_id()); + check_added_monitors!(nodes[1], 1); + expect_channel_pending_event(&nodes[1], &nodes[0].node.get_our_node_id()); + + nodes[0].node.handle_funding_signed(&nodes[1].node.get_our_node_id(), &funding_signed_msg); + // At this point, the channel should be closed, after having generated one monitor write (the + // watch_channel call which failed), but zero monitor updates. + check_added_monitors!(nodes[0], 1); + get_err_msg(&nodes[0], &nodes[1].node.get_our_node_id()); + let err_reason = ClosureReason::ProcessingError { err: "Channel funding outpoint was a duplicate".to_owned() }; + check_closed_events(&nodes[0], &[ExpectedCloseEvent::from_id_reason(funding_signed_msg.channel_id, true, err_reason)]); +} + #[test] fn test_duplicate_funding_err_in_funding() { // Test that if we have a live channel with one peer, then another peer comes along and tries