@@ -2548,7 +2548,7 @@ where
2548
2548
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
2549
2549
2550
2550
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2551
- let mut shutdown_result = None ;
2551
+ let shutdown_result;
2552
2552
loop {
2553
2553
let per_peer_state = self.per_peer_state.read().unwrap();
2554
2554
@@ -2563,10 +2563,10 @@ where
2563
2563
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
2564
2564
let funding_txo_opt = chan.context.get_funding_txo();
2565
2565
let their_features = &peer_state.latest_features;
2566
- let unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
2567
- let (shutdown_msg, mut monitor_update_opt, htlcs) =
2566
+ let (shutdown_msg, mut monitor_update_opt, htlcs, local_shutdown_result) =
2568
2567
chan.get_shutdown(&self.signer_provider, their_features, target_feerate_sats_per_1000_weight, override_shutdown_script)?;
2569
2568
failed_htlcs = htlcs;
2569
+ shutdown_result = local_shutdown_result;
2570
2570
2571
2571
// We can send the `shutdown` message before updating the `ChannelMonitor`
2572
2572
// here as we don't need the monitor update to complete until we send a
@@ -2594,7 +2594,6 @@ where
2594
2594
});
2595
2595
}
2596
2596
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2597
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
2598
2597
}
2599
2598
}
2600
2599
break;
@@ -2684,30 +2683,29 @@ where
2684
2683
self.close_channel_internal(channel_id, counterparty_node_id, target_feerate_sats_per_1000_weight, shutdown_script)
2685
2684
}
2686
2685
2687
- fn finish_close_channel(&self, shutdown_res: ShutdownResult) {
2686
+ fn finish_close_channel(&self, mut shutdown_res: ShutdownResult) {
2688
2687
debug_assert_ne!(self.per_peer_state.held_by_thread(), LockHeldState::HeldByThread);
2689
2688
#[cfg(debug_assertions)]
2690
2689
for (_, peer) in self.per_peer_state.read().unwrap().iter() {
2691
2690
debug_assert_ne!(peer.held_by_thread(), LockHeldState::HeldByThread);
2692
2691
}
2693
2692
2694
- let (monitor_update_option, mut failed_htlcs, unbroadcasted_batch_funding_txid) = shutdown_res;
2695
- log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", failed_htlcs.len());
2696
- for htlc_source in failed_htlcs.drain(..) {
2693
+ log_debug!(self.logger, "Finishing closure of channel with {} HTLCs to fail", shutdown_res.dropped_outbound_htlcs.len());
2694
+ for htlc_source in shutdown_res.dropped_outbound_htlcs.drain(..) {
2697
2695
let (source, payment_hash, counterparty_node_id, channel_id) = htlc_source;
2698
2696
let reason = HTLCFailReason::from_failure_code(0x4000 | 8);
2699
2697
let receiver = HTLCDestination::NextHopChannel { node_id: Some(counterparty_node_id), channel_id };
2700
2698
self.fail_htlc_backwards_internal(&source, &payment_hash, &reason, receiver);
2701
2699
}
2702
- if let Some((_, funding_txo, monitor_update)) = monitor_update_option {
2700
+ if let Some((_, funding_txo, monitor_update)) = shutdown_res.monitor_update {
2703
2701
// There isn't anything we can do if we get an update failure - we're already
2704
2702
// force-closing. The monitor update on the required in-memory copy should broadcast
2705
2703
// the latest local state, which is the best we can do anyway. Thus, it is safe to
2706
2704
// ignore the result here.
2707
2705
let _ = self.chain_monitor.update_channel(funding_txo, &monitor_update);
2708
2706
}
2709
2707
let mut shutdown_results = Vec::new();
2710
- if let Some(txid) = unbroadcasted_batch_funding_txid {
2708
+ if let Some(txid) = shutdown_res. unbroadcasted_batch_funding_txid {
2711
2709
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
2712
2710
let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
2713
2711
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -6242,22 +6240,19 @@ where
6242
6240
}
6243
6241
6244
6242
fn internal_closing_signed(&self, counterparty_node_id: &PublicKey, msg: &msgs::ClosingSigned) -> Result<(), MsgHandleErrInternal> {
6245
- let mut shutdown_result = None;
6246
- let unbroadcasted_batch_funding_txid;
6247
6243
let per_peer_state = self.per_peer_state.read().unwrap();
6248
6244
let peer_state_mutex = per_peer_state.get(counterparty_node_id)
6249
6245
.ok_or_else(|| {
6250
6246
debug_assert!(false);
6251
6247
MsgHandleErrInternal::send_err_msg_no_close(format!("Can't find a peer matching the passed counterparty node_id {}", counterparty_node_id), msg.channel_id)
6252
6248
})?;
6253
- let (tx, chan_option) = {
6249
+ let (tx, chan_option, shutdown_result ) = {
6254
6250
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
6255
6251
let peer_state = &mut *peer_state_lock;
6256
6252
match peer_state.channel_by_id.entry(msg.channel_id.clone()) {
6257
6253
hash_map::Entry::Occupied(mut chan_phase_entry) => {
6258
6254
if let ChannelPhase::Funded(chan) = chan_phase_entry.get_mut() {
6259
- unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
6260
- let (closing_signed, tx) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
6255
+ let (closing_signed, tx, shutdown_result) = try_chan_phase_entry!(self, chan.closing_signed(&self.fee_estimator, &msg), chan_phase_entry);
6261
6256
if let Some(msg) = closing_signed {
6262
6257
peer_state.pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
6263
6258
node_id: counterparty_node_id.clone(),
@@ -6270,8 +6265,8 @@ where
6270
6265
// also implies there are no pending HTLCs left on the channel, so we can
6271
6266
// fully delete it from tracking (the channel monitor is still around to
6272
6267
// watch for old state broadcasts)!
6273
- (tx, Some(remove_channel_phase!(self, chan_phase_entry)))
6274
- } else { (tx, None) }
6268
+ (tx, Some(remove_channel_phase!(self, chan_phase_entry)), shutdown_result )
6269
+ } else { (tx, None, shutdown_result ) }
6275
6270
} else {
6276
6271
return try_chan_phase_entry!(self, Err(ChannelError::Close(
6277
6272
"Got a closing_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6293,7 +6288,6 @@ where
6293
6288
});
6294
6289
}
6295
6290
self.issue_channel_close_events(&chan.context, ClosureReason::CooperativeClosure);
6296
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
6297
6291
}
6298
6292
mem::drop(per_peer_state);
6299
6293
if let Some(shutdown_result) = shutdown_result {
@@ -6972,8 +6966,7 @@ where
6972
6966
fn maybe_generate_initial_closing_signed(&self) -> bool {
6973
6967
let mut handle_errors: Vec<(PublicKey, Result<(), _>)> = Vec::new();
6974
6968
let mut has_update = false;
6975
- let mut shutdown_result = None;
6976
- let mut unbroadcasted_batch_funding_txid = None;
6969
+ let mut shutdown_results = Vec::new();
6977
6970
{
6978
6971
let per_peer_state = self.per_peer_state.read().unwrap();
6979
6972
@@ -6984,15 +6977,17 @@ where
6984
6977
peer_state.channel_by_id.retain(|channel_id, phase| {
6985
6978
match phase {
6986
6979
ChannelPhase::Funded(chan) => {
6987
- unbroadcasted_batch_funding_txid = chan.context.unbroadcasted_batch_funding_txid();
6988
6980
match chan.maybe_propose_closing_signed(&self.fee_estimator, &self.logger) {
6989
- Ok((msg_opt, tx_opt)) => {
6981
+ Ok((msg_opt, tx_opt, shutdown_result_opt )) => {
6990
6982
if let Some(msg) = msg_opt {
6991
6983
has_update = true;
6992
6984
pending_msg_events.push(events::MessageSendEvent::SendClosingSigned {
6993
6985
node_id: chan.context.get_counterparty_node_id(), msg,
6994
6986
});
6995
6987
}
6988
+ if let Some(shutdown_result) = shutdown_result_opt {
6989
+ shutdown_results.push(shutdown_result);
6990
+ }
6996
6991
if let Some(tx) = tx_opt {
6997
6992
// We're done with this channel. We got a closing_signed and sent back
6998
6993
// a closing_signed with a closing transaction to broadcast.
@@ -7007,7 +7002,6 @@ where
7007
7002
log_info!(self.logger, "Broadcasting {}", log_tx!(tx));
7008
7003
self.tx_broadcaster.broadcast_transactions(&[&tx]);
7009
7004
update_maps_on_chan_removal!(self, &chan.context);
7010
- shutdown_result = Some((None, Vec::new(), unbroadcasted_batch_funding_txid));
7011
7005
false
7012
7006
} else { true }
7013
7007
},
@@ -7029,7 +7023,7 @@ where
7029
7023
let _ = handle_error!(self, err, counterparty_node_id);
7030
7024
}
7031
7025
7032
- if let Some( shutdown_result) = shutdown_result {
7026
+ for shutdown_result in shutdown_results.drain(..) {
7033
7027
self.finish_close_channel(shutdown_result);
7034
7028
}
7035
7029
@@ -7048,7 +7042,7 @@ where
7048
7042
// Channel::force_shutdown tries to make us do) as we may still be in initialization,
7049
7043
// so we track the update internally and handle it when the user next calls
7050
7044
// timer_tick_occurred, guaranteeing we're running normally.
7051
- if let Some((counterparty_node_id, funding_txo, update)) = failure.0 .take() {
7045
+ if let Some((counterparty_node_id, funding_txo, update)) = failure.monitor_update .take() {
7052
7046
assert_eq!(update.updates.len(), 1);
7053
7047
if let ChannelMonitorUpdateStep::ChannelForceClosed { should_broadcast } = update.updates[0] {
7054
7048
assert!(should_broadcast);
@@ -9266,16 +9260,16 @@ where
9266
9260
log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
9267
9261
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
9268
9262
}
9269
- let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
9270
- if batch_funding_txid .is_some() {
9263
+ let mut shutdown_result = channel.context.force_shutdown(true);
9264
+ if shutdown_result.unbroadcasted_batch_funding_txid .is_some() {
9271
9265
return Err(DecodeError::InvalidValue);
9272
9266
}
9273
- if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
9267
+ if let Some((counterparty_node_id, funding_txo, update)) = shutdown_result. monitor_update {
9274
9268
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9275
9269
counterparty_node_id, funding_txo, update
9276
9270
});
9277
9271
}
9278
- failed_htlcs.append(&mut new_failed_htlcs );
9272
+ failed_htlcs.append(&mut shutdown_result.dropped_outbound_htlcs );
9279
9273
channel_closures.push_back((events::Event::ChannelClosed {
9280
9274
channel_id: channel.context.channel_id(),
9281
9275
user_channel_id: channel.context.get_user_id(),
0 commit comments