@@ -2047,8 +2047,12 @@ macro_rules! handle_monitor_update_completion {
2047
2047
));
2048
2048
if let Some(channel_state) = channel_state {
2049
2049
channel_state.2 = true;
2050
+ } else {
2051
+ debug_assert!(false, "Missing channel batch state for channel which completed initial monitor update");
2050
2052
}
2051
2053
batch_completed = batch_state.iter().all(|(_, _, completed)| *completed);
2054
+ } else {
2055
+ debug_assert!(false, "Missing batch state for channel which completed initial monitor update");
2052
2056
}
2053
2057
2054
2058
// When all channels in a batched funding transaction have become ready, it is not necessary
@@ -2707,7 +2711,8 @@ where
2707
2711
let mut funding_batch_states = self.funding_batch_states.lock().unwrap();
2708
2712
let affected_channels = funding_batch_states.remove(&txid).into_iter().flatten();
2709
2713
let per_peer_state = self.per_peer_state.read().unwrap();
2710
- for (channel_id, counterparty_node_id, _state) in affected_channels {
2714
+ let mut has_uncompleted_channel = None;
2715
+ for (channel_id, counterparty_node_id, state) in affected_channels {
2711
2716
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
2712
2717
let mut peer_state = peer_state_mutex.lock().unwrap();
2713
2718
if let Some(mut chan) = peer_state.channel_by_id.remove(&channel_id) {
@@ -2716,7 +2721,12 @@ where
2716
2721
shutdown_results.push(chan.context_mut().force_shutdown(false));
2717
2722
}
2718
2723
}
2724
+ has_uncompleted_channel = Some(has_uncompleted_channel.map_or(!state, |v| v || !state));
2719
2725
}
2726
+ debug_assert!(
2727
+ has_uncompleted_channel.unwrap_or(true),
2728
+ "Closing a batch where all channels have completed initial monitor update",
2729
+ );
2720
2730
}
2721
2731
for shutdown_result in shutdown_results.drain(..) {
2722
2732
self.finish_close_channel(shutdown_result);
@@ -3828,7 +3838,7 @@ where
3828
3838
3829
3839
/// Call this upon creation of a batch funding transaction for the given channels.
3830
3840
///
3831
- /// Return values are identical to ` funding_transaction_generated`, respective to
3841
+ /// Return values are identical to [`Self:: funding_transaction_generated`] , respective to
3832
3842
/// each individual channel and transaction output.
3833
3843
///
3834
3844
/// Do NOT broadcast the funding transaction yourself. This batch funding transcaction
@@ -3867,7 +3877,6 @@ where
3867
3877
}
3868
3878
}
3869
3879
3870
-
3871
3880
let txid = funding_transaction.txid();
3872
3881
let is_batch_funding = temporary_channels.len() > 1;
3873
3882
let mut funding_batch_states = if is_batch_funding {
@@ -3876,8 +3885,15 @@ where
3876
3885
None
3877
3886
};
3878
3887
let mut funding_batch_state = funding_batch_states.as_mut().and_then(|states| {
3879
- states.insert(txid, Vec::new());
3880
- states.get_mut(&txid)
3888
+ if states.contains_key(&txid) {
3889
+ result = result.clone().and(Err(APIError::APIMisuseError {
3890
+ err: "Batch funding transaction with the same txid already exists".to_owned()
3891
+ }));
3892
+ None
3893
+ } else {
3894
+ states.insert(txid, Vec::new());
3895
+ states.get_mut(&txid)
3896
+ }
3881
3897
});
3882
3898
for (channel_idx, &(temporary_channel_id, counterparty_node_id)) in temporary_channels.iter().enumerate() {
3883
3899
result = result.and_then(|_| self.funding_transaction_generated_intern(
@@ -7995,35 +8011,24 @@ where
7995
8011
peer_state.channel_by_id.retain(|_, phase| {
7996
8012
let context = match phase {
7997
8013
ChannelPhase::Funded(chan) => {
7998
- if !chan.context.is_funding_broadcast() {
7999
- update_maps_on_chan_removal!(self, &chan.context);
8000
- self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
8001
- // It is possible to have persisted the monitor upon funding_signed
8002
- // but not have broadcast the transaction, especially for batch funding.
8003
- // The monitor should be moved to the correct state.
8004
- failed_channels.push(chan.context.force_shutdown(false));
8005
- return false;
8006
- } else {
8007
- chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
8014
+ if chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger).is_ok() {
8008
8015
// We only retain funded channels that are not shutdown.
8009
- if !chan.is_shutdown() {
8010
- return true;
8011
- }
8016
+ return true;
8012
8017
}
8013
- &chan.context
8018
+ &mut chan.context
8014
8019
},
8015
8020
// Unfunded channels will always be removed.
8016
8021
ChannelPhase::UnfundedOutboundV1(chan) => {
8017
- &chan.context
8022
+ &mut chan.context
8018
8023
},
8019
8024
ChannelPhase::UnfundedInboundV1(chan) => {
8020
- &chan.context
8025
+ &mut chan.context
8021
8026
},
8022
8027
};
8023
8028
// Clean up for removal.
8024
8029
update_maps_on_chan_removal!(self, &context);
8025
8030
self.issue_channel_close_events(&context, ClosureReason::DisconnectedPeer);
8026
- failed_channels.push((None, Vec::new(), None ));
8031
+ failed_channels.push(context.force_shutdown(false ));
8027
8032
false
8028
8033
});
8029
8034
// Note that we don't bother generating any events for pre-accept channels -
@@ -9262,7 +9267,10 @@ where
9262
9267
log_error!(args.logger, " The ChannelMonitor for channel {} is at counterparty commitment transaction number {} but the ChannelManager is at counterparty commitment transaction number {}.",
9263
9268
&channel.context.channel_id(), monitor.get_cur_counterparty_commitment_number(), channel.get_cur_counterparty_commitment_transaction_number());
9264
9269
}
9265
- let (monitor_update, mut new_failed_htlcs, _batch_funding_txid) = channel.context.force_shutdown(true);
9270
+ let (monitor_update, mut new_failed_htlcs, batch_funding_txid) = channel.context.force_shutdown(true);
9271
+ if batch_funding_txid.is_some() {
9272
+ return Err(DecodeError::InvalidValue);
9273
+ }
9266
9274
if let Some((counterparty_node_id, funding_txo, update)) = monitor_update {
9267
9275
close_background_events.push(BackgroundEvent::MonitorUpdateRegeneratedOnStartup {
9268
9276
counterparty_node_id, funding_txo, update
0 commit comments