@@ -1193,15 +1193,15 @@ where
1193
1193
/// `PersistenceNotifierGuard::notify_on_drop(..)` and pass the lock to it, to ensure the
1194
1194
/// Notifier the lock contains sends out a notification when the lock is released.
1195
1195
total_consistency_lock: RwLock<()>,
1196
- /// Tracks the progress of channels going through batch v1 channel establishment by whether
1197
- /// funding_signed was received and the monitor has been persisted.
1196
+ /// Tracks the progress of channels going through batch funding by whether funding_signed was
1197
+ /// received and the monitor has been persisted.
1198
1198
///
1199
1199
/// This information does not need to be persisted as funding nodes can forget
1200
1200
/// unfunded channels upon disconnection.
1201
- v1_funding_batch_states : FairRwLock<HashMap<Txid, Mutex<HashMap<([u8;32], PublicKey), bool>>>>,
1201
+ funding_batch_states : FairRwLock<HashMap<Txid, Mutex<HashMap<([u8;32], PublicKey), bool>>>>,
1202
1202
/// Remaining channels in a funding batch need to be closed when one channel closes.
1203
1203
/// These batches are maintained here to be periodically processed to simplify locking behavior.
1204
- v1_funding_batches_to_be_closed : Mutex<Vec<Txid>>,
1204
+ funding_batches_to_be_closed : Mutex<Vec<Txid>>,
1205
1205
1206
1206
background_events_processed_since_startup: AtomicBool,
1207
1207
@@ -1813,10 +1813,10 @@ macro_rules! update_maps_on_chan_removal {
1813
1813
short_to_chan_info.remove(&$channel_context.outbound_scid_alias());
1814
1814
// If the channel was part of a batch funding transaction, all channels in that
1815
1815
// batch are affected.
1816
- let v1_funding_batch_states = $self.v1_funding_batch_states .read().unwrap();
1816
+ let funding_batch_states = $self.funding_batch_states .read().unwrap();
1817
1817
$channel_context.unbroadcasted_funding_txid().map(|txid| {
1818
- if v1_funding_batch_states .contains_key(&txid) {
1819
- $self.v1_funding_batches_to_be_closed .lock().unwrap().push(txid);
1818
+ if funding_batch_states .contains_key(&txid) {
1819
+ $self.funding_batches_to_be_closed .lock().unwrap().push(txid);
1820
1820
}
1821
1821
})
1822
1822
}}
@@ -1967,9 +1967,9 @@ macro_rules! handle_monitor_update_completion {
1967
1967
// should be updated as we have received funding_signed and persisted the monitor.
1968
1968
let mut completed_batch = None;
1969
1969
{
1970
- let v1_funding_batch_states = $self.v1_funding_batch_states .read().unwrap();
1970
+ let funding_batch_states = $self.funding_batch_states .read().unwrap();
1971
1971
let batch_state_key_value = $chan.context.unbroadcasted_funding_txid()
1972
- .and_then(|txid| v1_funding_batch_states .get_key_value(&txid));
1972
+ .and_then(|txid| funding_batch_states .get_key_value(&txid));
1973
1973
if let Some((txid, batch_state)) = batch_state_key_value {
1974
1974
let mut batch_state = batch_state.lock().unwrap();
1975
1975
batch_state.insert(
@@ -2019,7 +2019,7 @@ macro_rules! handle_monitor_update_completion {
2019
2019
// When all channels in a batched funding transaction have become ready, it is not necessary
2020
2020
// to track the progress of the batch anymore and the state of the channels can be updated.
2021
2021
if let Some(txid) = completed_batch {
2022
- let other_channel_ids = $self.v1_funding_batch_states .write().unwrap()
2022
+ let other_channel_ids = $self.funding_batch_states .write().unwrap()
2023
2023
.remove(&txid)
2024
2024
.map(|batch_state| batch_state.into_inner().unwrap().into_iter().map(|(k, _)| k))
2025
2025
.into_iter().flatten()
@@ -2254,8 +2254,8 @@ where
2254
2254
total_consistency_lock: RwLock::new(()),
2255
2255
background_events_processed_since_startup: AtomicBool::new(false),
2256
2256
persistence_notifier: Notifier::new(),
2257
- v1_funding_batch_states : FairRwLock::new(HashMap::new()),
2258
- v1_funding_batches_to_be_closed : Mutex::new(Vec::new()),
2257
+ funding_batch_states : FairRwLock::new(HashMap::new()),
2258
+ funding_batches_to_be_closed : Mutex::new(Vec::new()),
2259
2259
2260
2260
entropy_source,
2261
2261
node_signer,
@@ -3659,7 +3659,7 @@ where
3659
3659
}
3660
3660
3661
3661
let is_batch_funding = temporary_channels.len() > 1;
3662
- let v1_funding_batch_state = RefCell::new(HashMap::new());
3662
+ let funding_batch_state = RefCell::new(HashMap::new());
3663
3663
for (temporary_channel_id, counterparty_node_id) in temporary_channels {
3664
3664
result = result.and_then(|_| self.funding_transaction_generated_intern(
3665
3665
temporary_channel_id,
@@ -3684,7 +3684,7 @@ where
3684
3684
});
3685
3685
}
3686
3686
let outpoint = OutPoint { txid: tx.txid(), index: output_index.unwrap() };
3687
- v1_funding_batch_state .borrow_mut().insert((outpoint.to_channel_id(), (*counterparty_node_id).clone()), false);
3687
+ funding_batch_state .borrow_mut().insert((outpoint.to_channel_id(), (*counterparty_node_id).clone()), false);
3688
3688
Ok(outpoint)
3689
3689
})
3690
3690
);
@@ -3702,7 +3702,7 @@ where
3702
3702
self.issue_channel_close_events(&chan.context, ClosureReason::ProcessingError { err: e.clone() });
3703
3703
});
3704
3704
}
3705
- for (channel_id, counterparty_node_id) in v1_funding_batch_state .borrow().keys() {
3705
+ for (channel_id, counterparty_node_id) in funding_batch_state .borrow().keys() {
3706
3706
per_peer_state.get(counterparty_node_id)
3707
3707
.map(|peer_state_mutex| peer_state_mutex.lock().unwrap())
3708
3708
.and_then(|mut peer_state| peer_state.channel_by_id.remove(channel_id))
@@ -3713,9 +3713,9 @@ where
3713
3713
}
3714
3714
} else if is_batch_funding {
3715
3715
// Initialize the state of the batch.
3716
- self.v1_funding_batch_states .write().unwrap().insert(
3716
+ self.funding_batch_states .write().unwrap().insert(
3717
3717
funding_transaction.txid(),
3718
- Mutex::new(v1_funding_batch_state .into_inner()),
3718
+ Mutex::new(funding_batch_state .into_inner()),
3719
3719
);
3720
3720
}
3721
3721
result
@@ -4779,9 +4779,9 @@ where
4779
4779
// Close remaining channels in funding batches when one channel closes.
4780
4780
let mut affected_channels = Vec::new();
4781
4781
{
4782
- let mut v1_funding_batch_states = self.v1_funding_batch_states .write().unwrap();
4783
- for txid in self.v1_funding_batches_to_be_closed .lock().unwrap().drain(..) {
4784
- affected_channels.extend(v1_funding_batch_states
4782
+ let mut funding_batch_states = self.funding_batch_states .write().unwrap();
4783
+ for txid in self.funding_batches_to_be_closed .lock().unwrap().drain(..) {
4784
+ affected_channels.extend(funding_batch_states
4785
4785
.remove(&txid)
4786
4786
.map(|state| state.into_inner().unwrap().into_iter().map(|(k, _)| k))
4787
4787
.into_iter().flatten()
@@ -5845,7 +5845,7 @@ where
5845
5845
match peer_state.channel_by_id.entry(msg.channel_id) {
5846
5846
hash_map::Entry::Occupied(mut chan) => {
5847
5847
let is_batch_funding = chan.get().context.unbroadcasted_funding_txid()
5848
- .map(|txid| self.v1_funding_batch_states .read().unwrap().contains_key(&txid))
5848
+ .map(|txid| self.funding_batch_states .read().unwrap().contains_key(&txid))
5849
5849
.unwrap_or(false);
5850
5850
let monitor = try_chan_entry!(self,
5851
5851
chan.get_mut().funding_signed(&msg, best_block, &self.signer_provider, is_batch_funding, &self.logger), chan);
@@ -7547,20 +7547,24 @@ where
7547
7547
let peer_state = &mut *peer_state_lock;
7548
7548
7549
7549
peer_state.channel_by_id.retain(|_, chan| {
7550
- if !chan.context.is_funding_initiated() {
7550
+ if !chan.context.is_funding_broadcast() {
7551
+ update_maps_on_chan_removal!(self, &chan.context);
7552
+ self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
7551
7553
// It is possible to have persisted the monitor upon funding_signed
7552
7554
// but not have broadcast the transaction, especially for batch funding.
7553
7555
// The monitor should be moved to the correct state.
7554
7556
self.finish_force_close_channel(chan.context.force_shutdown(false));
7557
+ false
7555
7558
} else {
7556
7559
chan.remove_uncommitted_htlcs_and_mark_paused(&self.logger);
7560
+ if chan.is_shutdown() {
7561
+ update_maps_on_chan_removal!(self, &chan.context);
7562
+ self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
7563
+ false
7564
+ } else {
7565
+ true
7566
+ }
7557
7567
}
7558
- if chan.is_shutdown() {
7559
- update_maps_on_chan_removal!(self, &chan.context);
7560
- self.issue_channel_close_events(&chan.context, ClosureReason::DisconnectedPeer);
7561
- return false;
7562
- }
7563
- true
7564
7568
});
7565
7569
peer_state.inbound_v1_channel_by_id.retain(|_, chan| {
7566
7570
update_maps_on_chan_removal!(self, &chan.context);
@@ -8365,7 +8369,7 @@ where
8365
8369
}
8366
8370
number_of_channels += peer_state.channel_by_id.len();
8367
8371
for (_, channel) in peer_state.channel_by_id.iter() {
8368
- if !channel.context.is_funding_initiated () {
8372
+ if !channel.context.is_funding_broadcast () {
8369
8373
unfunded_channels += 1;
8370
8374
}
8371
8375
}
@@ -8377,7 +8381,7 @@ where
8377
8381
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
8378
8382
let peer_state = &mut *peer_state_lock;
8379
8383
for (_, channel) in peer_state.channel_by_id.iter() {
8380
- if channel.context.is_funding_initiated () {
8384
+ if channel.context.is_funding_broadcast () {
8381
8385
channel.write(writer)?;
8382
8386
}
8383
8387
}
@@ -8824,7 +8828,7 @@ where
8824
8828
if let Some(short_channel_id) = channel.context.get_short_channel_id() {
8825
8829
short_to_chan_info.insert(short_channel_id, (channel.context.get_counterparty_node_id(), channel.context.channel_id()));
8826
8830
}
8827
- if channel.context.is_funding_initiated () {
8831
+ if channel.context.is_funding_broadcast () {
8828
8832
id_to_peer.insert(channel.context.channel_id(), channel.context.get_counterparty_node_id());
8829
8833
}
8830
8834
match peer_channels.entry(channel.context.get_counterparty_node_id()) {
@@ -9521,8 +9525,8 @@ where
9521
9525
total_consistency_lock: RwLock::new(()),
9522
9526
background_events_processed_since_startup: AtomicBool::new(false),
9523
9527
persistence_notifier: Notifier::new(),
9524
- v1_funding_batch_states : FairRwLock::new(HashMap::new()),
9525
- v1_funding_batches_to_be_closed : Mutex::new(Vec::new()),
9528
+ funding_batch_states : FairRwLock::new(HashMap::new()),
9529
+ funding_batches_to_be_closed : Mutex::new(Vec::new()),
9526
9530
9527
9531
entropy_source: args.entropy_source,
9528
9532
node_signer: args.node_signer,
0 commit comments