From 6ab56a40bebe88498dbbc42812ecc493d9553ae3 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 30 Nov 2023 00:36:16 +0000 Subject: [PATCH 1/5] Move to `Funded` after `funding_signed` rather than on funding Previously, channels were stored in different maps in `PeerState` based on whether the funding had been set, keeping the keys across the maps consistent (pre-funding temporary_channel_ids vs funding-outpoint-based channel_ids). However, channels are now stored in a single `channel_by_id` map, making that point moot. Instead, here, we convert the `ChannelPhase` state transition boundary to "once we have a `ChannelMonitor`", which makes more sense now, and was actually the original proposed boundary. This also requires calling `signer_maybe_unblocked` on a pre-funded outbound channel, but that nicely also lets us limit the scope of `FundingCreated` message generation, which we do in the next commit. --- lightning/src/ln/channel.rs | 249 ++++++++++++++------------- lightning/src/ln/channelmanager.rs | 106 +++++++----- lightning/src/ln/functional_tests.rs | 4 +- 3 files changed, 193 insertions(+), 166 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 8d6144fc25f..50d6a5d57b4 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -793,7 +793,6 @@ pub(super) struct MonitorRestoreUpdates { pub(super) struct SignerResumeUpdates { pub commitment_update: Option, pub funding_signed: Option, - pub funding_created: Option, pub channel_ready: Option, } @@ -2942,99 +2941,6 @@ impl Channel where } // Message handlers: - - /// Handles a funding_signed message from the remote end. - /// If this call is successful, broadcast the funding transaction (and not before!) - pub fn funding_signed( - &mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L - ) -> Result::EcdsaSigner>, ChannelError> - where - L::Target: Logger - { - if !self.context.is_outbound() { - return Err(ChannelError::Close("Received funding_signed for an inbound channel?".to_owned())); - } - if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) { - return Err(ChannelError::Close("Received funding_signed in strange state!".to_owned())); - } - if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || - self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || - self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { - panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); - } - - let funding_script = self.context.get_funding_redeemscript(); - - let counterparty_keys = self.context.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); - let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); - - log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", - &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); - - let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); - let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; - { - let trusted_tx = initial_commitment_tx.trust(); - let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); - let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); - // They sign our commitment transaction, allowing us to broadcast the tx if we wish. - if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { - return Err(ChannelError::Close("Invalid funding_signed signature from peer".to_owned())); - } - } - - let holder_commitment_tx = HolderCommitmentTransaction::new( - initial_commitment_tx, - msg.signature, - Vec::new(), - &self.context.get_holder_pubkeys().funding_pubkey, - self.context.counterparty_funding_pubkey() - ); - - self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()) - .map_err(|_| ChannelError::Close("Failed to validate our commitment".to_owned()))?; - - - let funding_redeemscript = self.context.get_funding_redeemscript(); - let funding_txo = self.context.get_funding_txo().unwrap(); - let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); - let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); - let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); - let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); - monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); - let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, - shutdown_script, self.context.get_holder_selected_contest_delay(), - &self.context.destination_script, (funding_txo, funding_txo_script), - &self.context.channel_transaction_parameters, - funding_redeemscript.clone(), self.context.channel_value_satoshis, - obscure_factor, - holder_commitment_tx, best_block, self.context.counterparty_node_id); - channel_monitor.provide_initial_counterparty_commitment_tx( - counterparty_initial_bitcoin_tx.txid, Vec::new(), - self.context.cur_counterparty_commitment_transaction_number, - self.context.counterparty_cur_commitment_point.unwrap(), - counterparty_initial_commitment_tx.feerate_per_kw(), - counterparty_initial_commitment_tx.to_broadcaster_value_sat(), - counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); - - assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update! - if self.context.is_batch_funding() { - self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH); - } else { - self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); - } - self.context.cur_holder_commitment_transaction_number -= 1; - self.context.cur_counterparty_commitment_transaction_number -= 1; - - log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); - - let need_channel_ready = self.check_get_channel_ready(0).is_some(); - self.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); - Ok(channel_monitor) - } - /// Updates the state of the channel to indicate that all channels in the batch have received /// funding_signed and persisted their monitors. /// The funding transaction is consequently allowed to be broadcast, and the channel can be @@ -4308,20 +4214,15 @@ impl Channel where let channel_ready = if funding_signed.is_some() { self.check_get_channel_ready(0) } else { None }; - let funding_created = if self.context.signer_pending_funding && self.context.is_outbound() { - self.context.get_funding_created_msg(logger) - } else { None }; - log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed, {} funding_created, and {} channel_ready", + log_trace!(logger, "Signer unblocked with {} commitment_update, {} funding_signed and {} channel_ready", if commitment_update.is_some() { "a" } else { "no" }, if funding_signed.is_some() { "a" } else { "no" }, - if funding_created.is_some() { "a" } else { "no" }, if channel_ready.is_some() { "a" } else { "no" }); SignerResumeUpdates { commitment_update, funding_signed, - funding_created, channel_ready, } } @@ -6445,8 +6346,8 @@ impl OutboundV1Channel where SP::Target: SignerProvider { /// Note that channel_id changes during this call! /// Do NOT broadcast the funding transaction until after a successful funding_signed call! /// If an Err is returned, it is a ChannelError::Close. - pub fn get_funding_created(mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) - -> Result<(Channel, Option), (Self, ChannelError)> where L::Target: Logger { + pub fn get_funding_created(&mut self, funding_transaction: Transaction, funding_txo: OutPoint, is_batch_funding: bool, logger: &L) + -> Result, (Self, ChannelError)> where L::Target: Logger { if !self.context.is_outbound() { panic!("Tried to create outbound funding_created message on an inbound channel!"); } @@ -6489,11 +6390,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { } } - let channel = Channel { - context: self.context, - }; - - Ok((channel, funding_created)) + Ok(funding_created) } fn get_initial_channel_type(config: &UserConfig, their_features: &InitFeatures) -> ChannelTypeFeatures { @@ -6736,6 +6633,112 @@ impl OutboundV1Channel where SP::Target: SignerProvider { Ok(()) } + + /// Handles a funding_signed message from the remote end. + /// If this call is successful, broadcast the funding transaction (and not before!) + pub fn funding_signed( + mut self, msg: &msgs::FundingSigned, best_block: BestBlock, signer_provider: &SP, logger: &L + ) -> Result<(Channel, ChannelMonitor<::EcdsaSigner>), (OutboundV1Channel, ChannelError)> + where + L::Target: Logger + { + if !self.context.is_outbound() { + return Err((self, ChannelError::Close("Received funding_signed for an inbound channel?".to_owned()))); + } + if !matches!(self.context.channel_state, ChannelState::FundingNegotiated) { + return Err((self, ChannelError::Close("Received funding_signed in strange state!".to_owned()))); + } + if self.context.commitment_secrets.get_min_seen_secret() != (1 << 48) || + self.context.cur_counterparty_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER || + self.context.cur_holder_commitment_transaction_number != INITIAL_COMMITMENT_NUMBER { + panic!("Should not have advanced channel commitment tx numbers prior to funding_created"); + } + + let funding_script = self.context.get_funding_redeemscript(); + + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let counterparty_trusted_tx = counterparty_initial_commitment_tx.trust(); + let counterparty_initial_bitcoin_tx = counterparty_trusted_tx.built_transaction(); + + log_trace!(logger, "Initial counterparty tx for channel {} is: txid {} tx {}", + &self.context.channel_id(), counterparty_initial_bitcoin_tx.txid, encode::serialize_hex(&counterparty_initial_bitcoin_tx.transaction)); + + let holder_signer = self.context.build_holder_transaction_keys(self.context.cur_holder_commitment_transaction_number); + let initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_holder_commitment_transaction_number, &holder_signer, true, false, logger).tx; + { + let trusted_tx = initial_commitment_tx.trust(); + let initial_commitment_bitcoin_tx = trusted_tx.built_transaction(); + let sighash = initial_commitment_bitcoin_tx.get_sighash_all(&funding_script, self.context.channel_value_satoshis); + // They sign our commitment transaction, allowing us to broadcast the tx if we wish. + if let Err(_) = self.context.secp_ctx.verify_ecdsa(&sighash, &msg.signature, &self.context.get_counterparty_pubkeys().funding_pubkey) { + return Err((self, ChannelError::Close("Invalid funding_signed signature from peer".to_owned()))); + } + } + + let holder_commitment_tx = HolderCommitmentTransaction::new( + initial_commitment_tx, + msg.signature, + Vec::new(), + &self.context.get_holder_pubkeys().funding_pubkey, + self.context.counterparty_funding_pubkey() + ); + + let validated = + self.context.holder_signer.as_ref().validate_holder_commitment(&holder_commitment_tx, Vec::new()); + if validated.is_err() { + return Err((self, ChannelError::Close("Failed to validate our commitment".to_owned()))); + } + + let funding_redeemscript = self.context.get_funding_redeemscript(); + let funding_txo = self.context.get_funding_txo().unwrap(); + let funding_txo_script = funding_redeemscript.to_v0_p2wsh(); + let obscure_factor = get_commitment_transaction_number_obscure_factor(&self.context.get_holder_pubkeys().payment_point, &self.context.get_counterparty_pubkeys().payment_point, self.context.is_outbound()); + let shutdown_script = self.context.shutdown_scriptpubkey.clone().map(|script| script.into_inner()); + let mut monitor_signer = signer_provider.derive_channel_signer(self.context.channel_value_satoshis, self.context.channel_keys_id); + monitor_signer.provide_channel_parameters(&self.context.channel_transaction_parameters); + let channel_monitor = ChannelMonitor::new(self.context.secp_ctx.clone(), monitor_signer, + shutdown_script, self.context.get_holder_selected_contest_delay(), + &self.context.destination_script, (funding_txo, funding_txo_script), + &self.context.channel_transaction_parameters, + funding_redeemscript.clone(), self.context.channel_value_satoshis, + obscure_factor, + holder_commitment_tx, best_block, self.context.counterparty_node_id); + channel_monitor.provide_initial_counterparty_commitment_tx( + counterparty_initial_bitcoin_tx.txid, Vec::new(), + self.context.cur_counterparty_commitment_transaction_number, + self.context.counterparty_cur_commitment_point.unwrap(), + counterparty_initial_commitment_tx.feerate_per_kw(), + counterparty_initial_commitment_tx.to_broadcaster_value_sat(), + counterparty_initial_commitment_tx.to_countersignatory_value_sat(), logger); + + assert!(!self.context.channel_state.is_monitor_update_in_progress()); // We have no had any monitor(s) yet to fail update! + if self.context.is_batch_funding() { + self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::WAITING_FOR_BATCH); + } else { + self.context.channel_state = ChannelState::AwaitingChannelReady(AwaitingChannelReadyFlags::new()); + } + self.context.cur_holder_commitment_transaction_number -= 1; + self.context.cur_counterparty_commitment_transaction_number -= 1; + + log_info!(logger, "Received funding_signed from peer for channel {}", &self.context.channel_id()); + + let mut channel = Channel { context: self.context }; + + let need_channel_ready = channel.check_get_channel_ready(0).is_some(); + channel.monitor_updating_paused(false, false, need_channel_ready, Vec::new(), Vec::new(), Vec::new()); + Ok((channel, channel_monitor)) + } + + /// Indicates that the signer may have some signatures for us, so we should retry if we're + /// blocked. + #[allow(unused)] + pub fn signer_maybe_unblocked(&mut self, logger: &L) -> Option where L::Target: Logger { + if self.context.signer_pending_funding && self.context.is_outbound() { + log_trace!(logger, "Signer unblocked a funding_created"); + self.context.get_funding_created_msg(logger) + } else { None } + } } /// A not-yet-funded inbound (from counterparty) channel using V1 channel establishment. @@ -8364,11 +8367,12 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); + let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); + let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; // Put some inbound and outbound HTLCs in A's channel. let htlc_amount_msat = 11_092_000; // put an amount below A's effective dust limit but above B's. @@ -8492,11 +8496,12 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); + let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); + let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; // Now disconnect the two nodes and check that the commitment point in // Node B's channel_reestablish message is sane. @@ -8680,11 +8685,12 @@ mod tests { value: 10000000, script_pubkey: output_script.clone(), }]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); + let funding_created_msg = node_a_chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap(); let (_, funding_signed_msg, _) = node_b_chan.funding_created(&funding_created_msg.unwrap(), best_block, &&keys_provider, &&logger).map_err(|_| ()).unwrap(); // Node B --> Node A: funding signed - let _ = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger).unwrap(); + let res = node_a_chan.funding_signed(&funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; // Make sure that receiving a channel update will update the Channel as expected. let update = ChannelUpdate { @@ -9850,11 +9856,8 @@ mod tests { }, ]}; let funding_outpoint = OutPoint{ txid: tx.txid(), index: 0 }; - let (mut node_a_chan, funding_created_msg) = node_a_chan.get_funding_created( - tx.clone(), - funding_outpoint, - true, - &&logger, + let funding_created_msg = node_a_chan.get_funding_created( + tx.clone(), funding_outpoint, true, &&logger, ).map_err(|_| ()).unwrap(); let (mut node_b_chan, funding_signed_msg, _) = node_b_chan.funding_created( &funding_created_msg.unwrap(), @@ -9872,12 +9875,10 @@ mod tests { // Receive funding_signed, but the channel will be configured to hold sending channel_ready and // broadcasting the funding transaction until the batch is ready. - let _ = node_a_chan.funding_signed( - &funding_signed_msg.unwrap(), - best_block, - &&keys_provider, - &&logger, - ).unwrap(); + let res = node_a_chan.funding_signed( + &funding_signed_msg.unwrap(), best_block, &&keys_provider, &&logger, + ); + let (mut node_a_chan, _) = if let Ok(res) = res { res } else { panic!(); }; let node_a_updates = node_a_chan.monitor_updating_restored( &&logger, &&keys_provider, diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index ff1bd164a2f..37a9a886acc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -3730,7 +3730,7 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; let (chan, msg_opt) = match peer_state.channel_by_id.remove(temporary_channel_id) { - Some(ChannelPhase::UnfundedOutboundV1(chan)) => { + Some(ChannelPhase::UnfundedOutboundV1(mut chan)) => { let funding_txo = find_funding_output(&chan, &funding_transaction)?; let logger = WithChannelContext::from(&self.logger, &chan.context); @@ -3743,7 +3743,7 @@ where (chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity)) } else { unreachable!(); }); match funding_res { - Ok((chan, funding_msg)) => (chan, funding_msg), + Ok(funding_msg) => (chan, funding_msg), Err((chan, err)) => { mem::drop(peer_state_lock); mem::drop(per_peer_state); @@ -3783,7 +3783,7 @@ where if id_to_peer.insert(chan.context.channel_id(), chan.context.get_counterparty_node_id()).is_some() { panic!("id_to_peer map already contained funding txid, which shouldn't be possible"); } - e.insert(ChannelPhase::Funded(chan)); + e.insert(ChannelPhase::UnfundedOutboundV1(chan)); } } Ok(()) @@ -5429,7 +5429,7 @@ where // We got a temporary failure updating monitor, but will claim the // HTLC when the monitor updating is restored (or on chain). let logger = WithContext::from(&self.logger, None, Some(prev_hop_chan_id)); - log_error!(self.logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); + log_error!(logger, "Temporary failure claiming HTLC, treating as success: {}", err.err.err); } else { errs.push((pk, err)); } } } @@ -6310,22 +6310,43 @@ where let mut peer_state_lock = peer_state_mutex.lock().unwrap(); let peer_state = &mut *peer_state_lock; match peer_state.channel_by_id.entry(msg.channel_id) { - hash_map::Entry::Occupied(mut chan_phase_entry) => { - match chan_phase_entry.get_mut() { - ChannelPhase::Funded(ref mut chan) => { - let logger = WithChannelContext::from(&self.logger, &chan.context); - let monitor = try_chan_phase_entry!(self, - chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger), chan_phase_entry); - if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { - handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); - Ok(()) - } else { - try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry) + hash_map::Entry::Occupied(chan_phase_entry) => { + if matches!(chan_phase_entry.get(), ChannelPhase::UnfundedOutboundV1(_)) { + let chan = if let ChannelPhase::UnfundedOutboundV1(chan) = chan_phase_entry.remove() { chan } else { unreachable!() }; + let logger = WithContext::from( + &self.logger, + Some(chan.context.get_counterparty_node_id()), + Some(chan.context.channel_id()) + ); + let res = + chan.funding_signed(&msg, best_block, &self.signer_provider, &&logger); + match res { + Ok((chan, monitor)) => { + if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) { + // We really should be able to insert here without doing a second + // lookup, but sadly rust stdlib doesn't currently allow keeping + // the original Entry around with the value removed. + let mut chan = peer_state.channel_by_id.entry(msg.channel_id).or_insert(ChannelPhase::Funded(chan)); + if let ChannelPhase::Funded(ref mut chan) = &mut chan { + handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan, INITIAL_MONITOR); + } else { unreachable!(); } + Ok(()) + } else { + let e = ChannelError::Close("Channel funding outpoint was a duplicate".to_owned()); + return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::Funded(chan), &msg.channel_id).1); + } + }, + Err((chan, e)) => { + debug_assert!(matches!(e, ChannelError::Close(_)), + "We don't have a channel anymore, so the error better have expected close"); + // We've already removed this outbound channel from the map in + // `PeerState` above so at this point we just need to clean up any + // lingering entries concerning this channel as it is safe to do so. + return Err(convert_chan_phase_err!(self, e, &mut ChannelPhase::UnfundedOutboundV1(chan), &msg.channel_id).1); } - }, - _ => { - return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)); - }, + } + } else { + return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)); } }, hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close("Failed to find corresponding channel".to_owned(), msg.channel_id)) @@ -7251,29 +7272,34 @@ where let unblock_chan = |phase: &mut ChannelPhase, pending_msg_events: &mut Vec| { let node_id = phase.context().get_counterparty_node_id(); - if let ChannelPhase::Funded(chan) = phase { - let msgs = chan.signer_maybe_unblocked(&self.logger); - if let Some(updates) = msgs.commitment_update { - pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { - node_id, - updates, - }); - } - if let Some(msg) = msgs.funding_signed { - pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { - node_id, - msg, - }); - } - if let Some(msg) = msgs.funding_created { - pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { - node_id, - msg, - }); + match phase { + ChannelPhase::Funded(chan) => { + let msgs = chan.signer_maybe_unblocked(&self.logger); + if let Some(updates) = msgs.commitment_update { + pending_msg_events.push(events::MessageSendEvent::UpdateHTLCs { + node_id, + updates, + }); + } + if let Some(msg) = msgs.funding_signed { + pending_msg_events.push(events::MessageSendEvent::SendFundingSigned { + node_id, + msg, + }); + } + if let Some(msg) = msgs.channel_ready { + send_channel_ready!(self, pending_msg_events, chan, msg); + } } - if let Some(msg) = msgs.channel_ready { - send_channel_ready!(self, pending_msg_events, chan, msg); + ChannelPhase::UnfundedOutboundV1(chan) => { + if let Some(msg) = chan.signer_maybe_unblocked(&self.logger) { + pending_msg_events.push(events::MessageSendEvent::SendFundingCreated { + node_id, + msg, + }); + } } + ChannelPhase::UnfundedInboundV1(_) => {}, } }; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index a41cf306880..fee3a3b3994 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -9059,7 +9059,7 @@ fn test_duplicate_chan_id() { nodes[0].node.handle_accept_channel(&nodes[1].node.get_our_node_id(), &get_event_msg!(nodes[1], MessageSendEvent::SendAcceptChannel, nodes[0].node.get_our_node_id())); create_funding_transaction(&nodes[0], &nodes[1].node.get_our_node_id(), 100000, 42); // Get and check the FundingGenerationReady event - let (_, funding_created) = { + let funding_created = { let per_peer_state = nodes[0].node.per_peer_state.read().unwrap(); let mut a_peer_state = per_peer_state.get(&nodes[1].node.get_our_node_id()).unwrap().lock().unwrap(); // Once we call `get_funding_created` the channel has a duplicate channel_id as @@ -9067,7 +9067,7 @@ fn test_duplicate_chan_id() { // try to create another channel. Instead, we drop the channel entirely here (leaving the // channelmanager in a possibly nonsense state instead). match a_peer_state.channel_by_id.remove(&open_chan_2_msg.temporary_channel_id).unwrap() { - ChannelPhase::UnfundedOutboundV1(chan) => { + ChannelPhase::UnfundedOutboundV1(mut chan) => { let logger = test_utils::TestLogger::new(); chan.get_funding_created(tx.clone(), funding_outpoint, false, &&logger).map_err(|_| ()).unwrap() }, From 8e519b56d6c4c9fe397bb73dacce059d5db7703a Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Thu, 30 Nov 2023 00:48:37 +0000 Subject: [PATCH 2/5] Limit the scope of `get_funding_created_msg` to outbound channels Since we no longer use `ChannelContext::get_funding_created_msg`, it can be moved back into `UnfundedOutboundV1` channels only, where it realistically belongs. --- lightning/src/ln/channel.rs | 68 ++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 50d6a5d57b4..70eb69d0236 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -2406,38 +2406,6 @@ impl ChannelContext where SP::Target: SignerProvider { } } - /// Only allowed after [`Self::channel_transaction_parameters`] is set. - fn get_funding_created_msg(&mut self, logger: &L) -> Option where L::Target: Logger { - let counterparty_keys = self.build_remote_transaction_keys(); - let counterparty_initial_commitment_tx = self.build_commitment_transaction(self.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; - let signature = match &self.holder_signer { - // TODO (taproot|arik): move match into calling method for Taproot - ChannelSignerType::Ecdsa(ecdsa) => { - ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.secp_ctx) - .map(|(sig, _)| sig).ok()? - }, - // TODO (taproot|arik) - #[cfg(taproot)] - _ => todo!() - }; - - if self.signer_pending_funding { - log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding"); - self.signer_pending_funding = false; - } - - Some(msgs::FundingCreated { - temporary_channel_id: self.temporary_channel_id.unwrap(), - funding_txid: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid, - funding_output_index: self.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index, - signature, - #[cfg(taproot)] - partial_signature_with_nonce: None, - #[cfg(taproot)] - next_local_nonce: None, - }) - } - /// Only allowed after [`Self::channel_transaction_parameters`] is set. fn get_funding_signed_msg(&mut self, logger: &L) -> (CommitmentTransaction, Option) where L::Target: Logger { let counterparty_keys = self.build_remote_transaction_keys(); @@ -6339,6 +6307,38 @@ impl OutboundV1Channel where SP::Target: SignerProvider { }) } + /// Only allowed after [`ChannelContext::channel_transaction_parameters`] is set. + fn get_funding_created_msg(&mut self, logger: &L) -> Option where L::Target: Logger { + let counterparty_keys = self.context.build_remote_transaction_keys(); + let counterparty_initial_commitment_tx = self.context.build_commitment_transaction(self.context.cur_counterparty_commitment_transaction_number, &counterparty_keys, false, false, logger).tx; + let signature = match &self.context.holder_signer { + // TODO (taproot|arik): move match into calling method for Taproot + ChannelSignerType::Ecdsa(ecdsa) => { + ecdsa.sign_counterparty_commitment(&counterparty_initial_commitment_tx, Vec::new(), Vec::new(), &self.context.secp_ctx) + .map(|(sig, _)| sig).ok()? + }, + // TODO (taproot|arik) + #[cfg(taproot)] + _ => todo!() + }; + + if self.context.signer_pending_funding { + log_trace!(logger, "Counterparty commitment signature ready for funding_created message: clearing signer_pending_funding"); + self.context.signer_pending_funding = false; + } + + Some(msgs::FundingCreated { + temporary_channel_id: self.context.temporary_channel_id.unwrap(), + funding_txid: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().txid, + funding_output_index: self.context.channel_transaction_parameters.funding_outpoint.as_ref().unwrap().index, + signature, + #[cfg(taproot)] + partial_signature_with_nonce: None, + #[cfg(taproot)] + next_local_nonce: None, + }) + } + /// Updates channel state with knowledge of the funding transaction's txid/index, and generates /// a funding_created message for the remote peer. /// Panics if called at some time other than immediately after initial handshake, if called twice, @@ -6382,7 +6382,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { self.context.funding_transaction = Some(funding_transaction); self.context.is_batch_funding = Some(()).filter(|_| is_batch_funding); - let funding_created = self.context.get_funding_created_msg(logger); + let funding_created = self.get_funding_created_msg(logger); if funding_created.is_none() { if !self.context.signer_pending_funding { log_trace!(logger, "funding_created awaiting signer; setting signer_pending_funding"); @@ -6736,7 +6736,7 @@ impl OutboundV1Channel where SP::Target: SignerProvider { pub fn signer_maybe_unblocked(&mut self, logger: &L) -> Option where L::Target: Logger { if self.context.signer_pending_funding && self.context.is_outbound() { log_trace!(logger, "Signer unblocked a funding_created"); - self.context.get_funding_created_msg(logger) + self.get_funding_created_msg(logger) } else { None } } } From 1667b4d20072dad58bc7252966221dfaa18f38b8 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 29 Nov 2023 05:58:52 +0000 Subject: [PATCH 3/5] Drop unreachable shutdown code in `Channel::get_shutdown` `Channel` is only a thing for funded channels. Thus, checking if a channel has not yet been funded is dead code and can simply be elided. --- lightning/src/ln/channel.rs | 32 ++++-------------------------- lightning/src/ln/channelmanager.rs | 4 ++-- 2 files changed, 6 insertions(+), 30 deletions(-) diff --git a/lightning/src/ln/channel.rs b/lightning/src/ln/channel.rs index 70eb69d0236..582f67878e6 100644 --- a/lightning/src/ln/channel.rs +++ b/lightning/src/ln/channel.rs @@ -5972,12 +5972,9 @@ impl Channel where /// Begins the shutdown process, getting a message for the remote peer and returning all /// holding cell HTLCs for payment failure. - /// - /// May jump to the channel being fully shutdown (see [`Self::is_shutdown`]) in which case no - /// [`ChannelMonitorUpdate`] will be returned). pub fn get_shutdown(&mut self, signer_provider: &SP, their_features: &InitFeatures, target_feerate_sats_per_kw: Option, override_shutdown_script: Option) - -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>, Option), APIError> + -> Result<(msgs::Shutdown, Option, Vec<(HTLCSource, PaymentHash)>), APIError> { for htlc in self.context.pending_outbound_htlcs.iter() { if let OutboundHTLCState::LocalAnnounced(_) = htlc.state { @@ -5998,16 +5995,9 @@ impl Channel where return Err(APIError::ChannelUnavailable{err: "Cannot begin shutdown while peer is disconnected or we're waiting on a monitor update, maybe force-close instead?".to_owned()}); } - // If we haven't funded the channel yet, we don't need to bother ensuring the shutdown - // script is set, we just force-close and call it a day. - let mut chan_closed = false; - if self.context.channel_state.is_pre_funded_state() { - chan_closed = true; - } - let update_shutdown_script = match self.context.shutdown_scriptpubkey { Some(_) => false, - None if !chan_closed => { + None => { // use override shutdown script if provided let shutdown_scriptpubkey = match override_shutdown_script { Some(script) => script, @@ -6025,25 +6015,11 @@ impl Channel where self.context.shutdown_scriptpubkey = Some(shutdown_scriptpubkey); true }, - None => false, }; // From here on out, we may not fail! self.context.target_closing_feerate_sats_per_kw = target_feerate_sats_per_kw; - let shutdown_result = if self.context.channel_state.is_pre_funded_state() { - let shutdown_result = ShutdownResult { - monitor_update: None, - dropped_outbound_htlcs: Vec::new(), - unbroadcasted_batch_funding_txid: self.context.unbroadcasted_batch_funding_txid(), - channel_id: self.context.channel_id, - counterparty_node_id: self.context.counterparty_node_id, - }; - self.context.channel_state = ChannelState::ShutdownComplete; - Some(shutdown_result) - } else { - self.context.channel_state.set_local_shutdown_sent(); - None - }; + self.context.channel_state.set_local_shutdown_sent(); self.context.update_time_counter += 1; let monitor_update = if update_shutdown_script { @@ -6079,7 +6055,7 @@ impl Channel where debug_assert!(!self.is_shutdown() || monitor_update.is_none(), "we can't both complete shutdown and return a monitor update"); - Ok((shutdown, monitor_update, dropped_outbound_htlcs, shutdown_result)) + Ok((shutdown, monitor_update, dropped_outbound_htlcs)) } pub fn inflight_htlc_sources(&self) -> impl Iterator { diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index 37a9a886acc..b1afeb1e9fc 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2730,10 +2730,10 @@ where if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() { let funding_txo_opt = chan.context.get_funding_txo(); let their_features = &peer_state.latest_features; - let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) = + let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; failed_htlcs = htlcs; - shutdown_result = local_shutdown_result; + shutdown_result = None; debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown()); // We can send the `shutdown` message before updating the `ChannelMonitor` From 3a2690c8aac91e46c55060c84b41c692e18314ca Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 29 Nov 2023 18:11:30 +0000 Subject: [PATCH 4/5] Move pre-funded-channel immediate shutdown logic to the right place Because a `Funded` `Channel` cannot possibly be pre-funding, the logic in `ChannelManager::close_channel_internal` to handle pre-funding channels is in the wrong place. Rather than being handled inside the `Funded` branch, it should be in an `else` following it, handling either of the two `ChannelPhases` outside of `Funded`. Sadly, because of a previous control flow management `loop {}`, the existing code will infinite loop, which is fixed here. --- lightning/src/ln/channelmanager.rs | 26 ++++++++------------------ lightning/src/ln/shutdown_tests.rs | 15 +++++++++++++++ 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index b1afeb1e9fc..a74dbbfe4ce 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2714,9 +2714,10 @@ where fn close_channel_internal(&self, channel_id: &ChannelId, counterparty_node_id: &PublicKey, target_feerate_sats_per_1000_weight: Option, override_shutdown_script: Option) -> Result<(), APIError> { let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self); - let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>; - let shutdown_result; - loop { + let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)> = Vec::new(); + let mut shutdown_result = None; + + { let per_peer_state = self.per_peer_state.read().unwrap(); let peer_state_mutex = per_peer_state.get(counterparty_node_id) @@ -2733,8 +2734,6 @@ where let (shutdown_msg, mut monitor_update_opt, htlcs) = chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?; failed_htlcs = htlcs; - shutdown_result = None; - debug_assert_eq!(shutdown_result.is_some(), chan.is_shutdown()); // We can send the `shutdown` message before updating the `ChannelMonitor` // here as we don't need the monitor update to complete until we send a @@ -2751,20 +2750,11 @@ where if let Some(monitor_update) = monitor_update_opt.take() { handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update, peer_state_lock, peer_state, per_peer_state, chan); - break; } - - if chan.is_shutdown() { - if let ChannelPhase::Funded(chan) = remove_channel_phase!(self, chan_phase_entry) { - if let Ok(channel_update) = self.get_channel_update_for_broadcast(&chan) { - peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate { - msg: channel_update - }); - } - self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed); - } - } - break; + } else { + self.issue_channel_close_events(chan_phase_entry.get().context(), ClosureReason::HolderForceClosed); + let mut chan_phase = remove_channel_phase!(self, chan_phase_entry); + shutdown_result = Some(chan_phase.context_mut().force_shutdown(false)); } }, hash_map::Entry::Vacant(_) => { diff --git a/lightning/src/ln/shutdown_tests.rs b/lightning/src/ln/shutdown_tests.rs index 308211d0b37..bdd32e90d0a 100644 --- a/lightning/src/ln/shutdown_tests.rs +++ b/lightning/src/ln/shutdown_tests.rs @@ -277,6 +277,21 @@ fn shutdown_on_unfunded_channel() { check_closed_event!(nodes[0], 1, ClosureReason::CounterpartyCoopClosedUnfundedChannel, [nodes[1].node.get_our_node_id()], 1_000_000); } +#[test] +fn close_on_unfunded_channel() { + // Test the user asking us to close prior to funding generation + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[None, None]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_id = nodes[0].node.create_channel(nodes[1].node.get_our_node_id(), 1_000_000, 100_000, 0, None, None).unwrap(); + let _open_chan = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id()); + + nodes[0].node.close_channel(&chan_id, &nodes[1].node.get_our_node_id()).unwrap(); + check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 1_000_000); +} + #[test] fn expect_channel_shutdown_state_with_force_closure() { // Test sending a shutdown prior to channel_ready after funding generation From 5ba4c079bb242f5a0279a36d24ba87e3d8d1d528 Mon Sep 17 00:00:00 2001 From: Matt Corallo Date: Wed, 29 Nov 2023 06:02:46 +0000 Subject: [PATCH 5/5] Immediately error in `close_channel_internal` if there is no chan Previously, unfunded channels would be stored outside of `PeerState::channel_by_id`, and thus if there is no channel when we look in `PeerState::channel_by_id`, `close_channel_internal` called `force_close_channel_with_peer` to hunt for unfunded channels. However, that is no longer the case, so the call is redundant, and we can simply return an error instead. --- lightning/src/ln/channelmanager.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/channelmanager.rs b/lightning/src/ln/channelmanager.rs index a74dbbfe4ce..7c6ee1e429c 100644 --- a/lightning/src/ln/channelmanager.rs +++ b/lightning/src/ln/channelmanager.rs @@ -2758,13 +2758,12 @@ where } }, hash_map::Entry::Vacant(_) => { - // If we reach this point, it means that the channel_id either refers to an unfunded channel or - // it does not exist for this peer. Either way, we can attempt to force-close it. - // - // An appropriate error will be returned for non-existence of the channel if that's the case. - mem::drop(peer_state_lock); - mem::drop(per_peer_state); - return self.force_close_channel_with_peer(&channel_id, counterparty_node_id, None, false).map(|_| ()) + return Err(APIError::ChannelUnavailable { + err: format!( + "Channel with id {} not found for the passed counterparty node_id {}", + channel_id, counterparty_node_id, + ) + }); }, } }