Skip to content

Commit 428d21b

Browse files
f - remove peers through timer_tick_occurred
1 parent ff18bf3 commit 428d21b

File tree

1 file changed

+32
-54
lines changed

1 file changed

+32
-54
lines changed

lightning/src/ln/channelmanager.rs

Lines changed: 32 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -620,8 +620,6 @@ pub type SimpleRefChannelManager<'a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, M, T, F, L> = C
620620
// | |
621621
// | |__`best_block`
622622
// | |
623-
// | |__`pending_peers_awaiting_removal`
624-
// | |
625623
// | |__`pending_events`
626624
// | |
627625
// | |__`pending_background_events`
@@ -789,16 +787,6 @@ where
789787

790788
/// See `ChannelManager` struct-level documentation for lock order requirements.
791789
pending_events: Mutex<Vec<events::Event>>,
792-
/// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
793-
/// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of
794-
/// to that peer is later closed while still being disconnected (i.e. force closed), we
795-
/// therefore need to remove the peer from `peer_state` separately.
796-
/// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we
797-
/// instead store such peers awaiting removal in this field, and remove them on a timer to
798-
/// limit the negative effects on parallelism as much as possible.
799-
///
800-
/// See `ChannelManager` struct-level documentation for lock order requirements.
801-
pending_peers_awaiting_removal: Mutex<HashSet<PublicKey>>,
802790
/// See `ChannelManager` struct-level documentation for lock order requirements.
803791
pending_background_events: Mutex<Vec<BackgroundEvent>>,
804792
/// Used when we have to take a BIG lock to make sure everything is self-consistent.
@@ -1329,11 +1317,10 @@ macro_rules! try_chan_entry {
13291317
}
13301318

13311319
macro_rules! remove_channel {
1332-
($self: expr, $entry: expr, $peer_state: expr) => {
1320+
($self: expr, $entry: expr) => {
13331321
{
13341322
let channel = $entry.remove_entry().1;
13351323
update_maps_on_chan_removal!($self, channel);
1336-
$self.add_pending_peer_to_be_removed(channel.get_counterparty_node_id(), $peer_state);
13371324
channel
13381325
}
13391326
}
@@ -1506,7 +1493,6 @@ where
15061493
per_peer_state: FairRwLock::new(HashMap::new()),
15071494

15081495
pending_events: Mutex::new(Vec::new()),
1509-
pending_peers_awaiting_removal: Mutex::new(HashSet::new()),
15101496
pending_background_events: Mutex::new(Vec::new()),
15111497
total_consistency_lock: RwLock::new(()),
15121498
persistence_notifier: Notifier::new(),
@@ -1745,7 +1731,7 @@ where
17451731
let (result, is_permanent) =
17461732
handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
17471733
if is_permanent {
1748-
remove_channel!(self, chan_entry, peer_state);
1734+
remove_channel!(self, chan_entry);
17491735
break result;
17501736
}
17511737
}
@@ -1756,7 +1742,7 @@ where
17561742
});
17571743

17581744
if chan_entry.get().is_shutdown() {
1759-
let channel = remove_channel!(self, chan_entry, peer_state);
1745+
let channel = remove_channel!(self, chan_entry);
17601746
if let Ok(channel_update) = self.get_channel_update_for_broadcast(&channel) {
17611747
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
17621748
msg: channel_update
@@ -1859,7 +1845,7 @@ where
18591845
} else {
18601846
self.issue_channel_close_events(chan.get(),ClosureReason::HolderForceClosed);
18611847
}
1862-
remove_channel!(self, chan, peer_state)
1848+
remove_channel!(self, chan)
18631849
} else {
18641850
return Err(APIError::ChannelUnavailable{ err: format!("Channel with id {} not found for the passed counterparty node_id {}", log_bytes!(*channel_id), peer_node_id) });
18651851
}
@@ -1898,13 +1884,6 @@ where
18981884
}
18991885
}
19001886

1901-
fn add_pending_peer_to_be_removed(&self, counterparty_node_id: PublicKey, peer_state: &mut PeerState<<SP::Target as SignerProvider>::Signer>) {
1902-
let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
1903-
if peer_should_be_removed {
1904-
self.pending_peers_awaiting_removal.lock().unwrap().insert(counterparty_node_id);
1905-
}
1906-
}
1907-
19081887
/// Force closes a channel, immediately broadcasting the latest local transaction(s) and
19091888
/// rejecting new HTLCs on the given channel. Fails if `channel_id` is unknown to
19101889
/// the manager, or if the `counterparty_node_id` isn't the counterparty of the corresponding
@@ -3358,16 +3337,20 @@ where
33583337
true
33593338
}
33603339

3361-
/// Removes peers which have been been added to `pending_peers_awaiting_removal` which are
3362-
/// still disconnected and we have no channels to.
3340+
/// When a peer disconnects but still has channels, the peer's `peer_state` entry in the
3341+
/// `per_peer_state` is not removed by the `peer_disconnected` function. If the channels of
3342+
/// to that peer is later closed while still being disconnected (i.e. force closed), we
3343+
/// therefore need to remove the peer from `peer_state` separately.
3344+
/// To avoid having to take the `per_peer_state` `write` lock once the channels are closed, we
3345+
/// instead remove such peers awaiting removal through this function, which is called on a
3346+
/// timer through `timer_tick_occurred`, passing the peers disconnected peers with no channels,
3347+
/// to limit the negative effects on parallelism as much as possible.
33633348
///
33643349
/// Must be called without the `per_peer_state` lock acquired.
3365-
fn remove_peers_awaiting_removal(&self) {
3366-
let mut pending_peers_awaiting_removal = HashSet::new();
3367-
mem::swap(&mut *self.pending_peers_awaiting_removal.lock().unwrap(), &mut pending_peers_awaiting_removal);
3350+
fn remove_peers_awaiting_removal(&self, pending_peers_awaiting_removal: HashSet<PublicKey>) {
33683351
if pending_peers_awaiting_removal.len() > 0 {
33693352
let mut per_peer_state = self.per_peer_state.write().unwrap();
3370-
for counterparty_node_id in pending_peers_awaiting_removal.drain() {
3353+
for counterparty_node_id in pending_peers_awaiting_removal {
33713354
match per_peer_state.entry(counterparty_node_id) {
33723355
hash_map::Entry::Occupied(entry) => {
33733356
// Remove the entry if the peer is still disconnected and we still
@@ -3446,6 +3429,7 @@ where
34463429
/// the channel.
34473430
/// * Expiring a channel's previous `ChannelConfig` if necessary to only allow forwarding HTLCs
34483431
/// with the current `ChannelConfig`.
3432+
/// * Removing peers which have disconnected but and no longer have any channels.
34493433
///
34503434
/// Note that this may cause reentrancy through `chain::Watch::update_channel` calls or feerate
34513435
/// estimate fetches.
@@ -3458,6 +3442,7 @@ where
34583442

34593443
let mut handle_errors: Vec<(Result<(), _>, _)> = Vec::new();
34603444
let mut timed_out_mpp_htlcs = Vec::new();
3445+
let mut pending_peers_awaiting_removal = HashSet::new();
34613446
{
34623447
let per_peer_state = self.per_peer_state.read().unwrap();
34633448
for (counterparty_node_id, peer_state_mutex) in per_peer_state.iter() {
@@ -3505,10 +3490,13 @@ where
35053490

35063491
true
35073492
});
3508-
self.add_pending_peer_to_be_removed(counterparty_node_id, peer_state);
3493+
let peer_should_be_removed = !peer_state.is_connected && peer_state.channel_by_id.len() == 0;
3494+
if peer_should_be_removed {
3495+
pending_peers_awaiting_removal.insert(counterparty_node_id);
3496+
}
35093497
}
35103498
}
3511-
self.remove_peers_awaiting_removal();
3499+
self.remove_peers_awaiting_removal(pending_peers_awaiting_removal);
35123500

35133501
self.claimable_payments.lock().unwrap().claimable_htlcs.retain(|payment_hash, (_, htlcs)| {
35143502
if htlcs.is_empty() {
@@ -4263,7 +4251,7 @@ where
42634251
}
42644252
};
42654253
peer_state.pending_msg_events.push(send_msg_err_event);
4266-
let _ = remove_channel!(self, channel, peer_state);
4254+
let _ = remove_channel!(self, channel);
42674255
return Err(APIError::APIMisuseError { err: "Please use accept_inbound_channel_from_trusted_peer_0conf to accept channels with zero confirmations.".to_owned() });
42684256
}
42694257

@@ -4549,7 +4537,7 @@ where
45494537
let (result, is_permanent) =
45504538
handle_monitor_update_res!(self, update_res, chan_entry.get_mut(), RAACommitmentOrder::CommitmentFirst, chan_entry.key(), NO_UPDATE);
45514539
if is_permanent {
4552-
remove_channel!(self, chan_entry, peer_state);
4540+
remove_channel!(self, chan_entry);
45534541
break result;
45544542
}
45554543
}
@@ -4598,7 +4586,7 @@ where
45984586
// also implies there are no pending HTLCs left on the channel, so we can
45994587
// fully delete it from tracking (the channel monitor is still around to
46004588
// watch for old state broadcasts)!
4601-
(tx, Some(remove_channel!(self, chan_entry, peer_state)))
4589+
(tx, Some(remove_channel!(self, chan_entry)))
46024590
} else { (tx, None) }
46034591
},
46044592
hash_map::Entry::Vacant(_) => return Err(MsgHandleErrInternal::send_err_msg_no_close(format!("Got a message for a channel from the wrong node! No such channel for the passed counterparty_node_id {}", counterparty_node_id), msg.channel_id))
@@ -5101,11 +5089,12 @@ where
51015089
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
51025090
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
51035091
let peer_state = &mut *peer_state_lock;
5092+
let pending_msg_events = &mut peer_state.pending_msg_events;
51045093
if let hash_map::Entry::Occupied(chan_entry) = peer_state.channel_by_id.entry(funding_outpoint.to_channel_id()) {
5105-
let mut chan = remove_channel!(self, chan_entry, peer_state);
5094+
let mut chan = remove_channel!(self, chan_entry);
51065095
failed_channels.push(chan.force_shutdown(false));
51075096
if let Ok(update) = self.get_channel_update_for_broadcast(&chan) {
5108-
peer_state.pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
5097+
pending_msg_events.push(events::MessageSendEvent::BroadcastChannelUpdate {
51095098
msg: update
51105099
});
51115100
}
@@ -5115,7 +5104,7 @@ where
51155104
ClosureReason::CommitmentTxConfirmed
51165105
};
51175106
self.issue_channel_close_events(&chan, reason);
5118-
peer_state.pending_msg_events.push(events::MessageSendEvent::HandleError {
5107+
pending_msg_events.push(events::MessageSendEvent::HandleError {
51195108
node_id: chan.get_counterparty_node_id(),
51205109
action: msgs::ErrorAction::SendErrorMessage {
51215110
msg: msgs::ErrorMessage { channel_id: chan.channel_id(), data: "Channel force-closed".to_owned() }
@@ -5157,7 +5146,7 @@ where
51575146
{
51585147
let per_peer_state = self.per_peer_state.read().unwrap();
51595148

5160-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
5149+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
51615150
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
51625151
let peer_state = &mut *peer_state_lock;
51635152
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -5197,7 +5186,6 @@ where
51975186
}
51985187
}
51995188
});
5200-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
52015189
}
52025190
}
52035191

@@ -5222,7 +5210,7 @@ where
52225210
{
52235211
let per_peer_state = self.per_peer_state.read().unwrap();
52245212

5225-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
5213+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
52265214
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
52275215
let peer_state = &mut *peer_state_lock;
52285216
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -5260,7 +5248,6 @@ where
52605248
}
52615249
}
52625250
});
5263-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
52645251
}
52655252
}
52665253

@@ -5836,7 +5823,7 @@ where
58365823
let mut timed_out_htlcs = Vec::new();
58375824
{
58385825
let per_peer_state = self.per_peer_state.read().unwrap();
5839-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
5826+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
58405827
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
58415828
let peer_state = &mut *peer_state_lock;
58425829
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -5920,7 +5907,6 @@ where
59205907
}
59215908
true
59225909
});
5923-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
59245910
}
59255911
}
59265912

@@ -6248,7 +6234,7 @@ where
62486234

62496235
let per_peer_state = self.per_peer_state.read().unwrap();
62506236

6251-
for (cp_id, peer_state_mutex) in per_peer_state.iter() {
6237+
for (_cp_id, peer_state_mutex) in per_peer_state.iter() {
62526238
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
62536239
let peer_state = &mut *peer_state_lock;
62546240
let pending_msg_events = &mut peer_state.pending_msg_events;
@@ -6280,7 +6266,6 @@ where
62806266
}
62816267
retain
62826268
});
6283-
self.add_pending_peer_to_be_removed(*cp_id, peer_state);
62846269
}
62856270
//TODO: Also re-broadcast announcement_signatures
62866271
Ok(())
@@ -6794,8 +6779,6 @@ where
67946779

67956780
write_ver_prefix!(writer, SERIALIZATION_VERSION, MIN_SERIALIZATION_VERSION);
67966781

6797-
self.remove_peers_awaiting_removal();
6798-
67996782
self.genesis_hash.write(writer)?;
68006783
{
68016784
let best_block = self.best_block.read().unwrap();
@@ -7620,7 +7603,6 @@ where
76207603
per_peer_state: FairRwLock::new(per_peer_state),
76217604

76227605
pending_events: Mutex::new(pending_events_read),
7623-
pending_peers_awaiting_removal: Mutex::new(HashSet::new()),
76247606
pending_background_events: Mutex::new(pending_background_events_read),
76257607
total_consistency_lock: RwLock::new(()),
76267608
persistence_notifier: Notifier::new(),
@@ -8105,9 +8087,6 @@ mod tests {
81058087
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
81068088
// disconnected and the channel between has been force closed.
81078089
let nodes_0_per_peer_state = nodes[0].node.per_peer_state.read().unwrap();
8108-
let nodes_0_pending_peers_awaiting_removal = nodes[0].node.pending_peers_awaiting_removal.lock().unwrap();
8109-
assert_eq!(nodes_0_pending_peers_awaiting_removal.len(), 1);
8110-
assert!(nodes_0_pending_peers_awaiting_removal.get(&nodes[1].node.get_our_node_id()).is_some());
81118090
// Assert that nodes[1] isn't removed before `timer_tick_occurred` has been executed.
81128091
assert_eq!(nodes_0_per_peer_state.len(), 1);
81138092
assert!(nodes_0_per_peer_state.get(&nodes[1].node.get_our_node_id()).is_some());
@@ -8118,7 +8097,6 @@ mod tests {
81188097
{
81198098
// Assert that nodes[1] has now been removed.
81208099
assert_eq!(nodes[0].node.per_peer_state.read().unwrap().len(), 0);
8121-
assert_eq!(nodes[0].node.pending_peers_awaiting_removal.lock().unwrap().len(), 0);
81228100
}
81238101
}
81248102

0 commit comments

Comments
 (0)