@@ -2051,11 +2051,11 @@ macro_rules! handle_new_monitor_update {
2051
2051
ChannelMonitorUpdateStatus::InProgress => {
2052
2052
log_debug!($self.logger, "ChannelMonitor update for {} in flight, holding messages until the update completes.",
2053
2053
&$chan.context.channel_id());
2054
- Ok( false)
2054
+ false
2055
2055
},
2056
2056
ChannelMonitorUpdateStatus::Completed => {
2057
2057
$completed;
2058
- Ok( true)
2058
+ true
2059
2059
},
2060
2060
}
2061
2061
} };
@@ -2070,14 +2070,9 @@ macro_rules! handle_new_monitor_update {
2070
2070
$per_peer_state_lock, chan, MANUALLY_REMOVING_INITIAL_MONITOR, { $chan_entry.remove() })
2071
2071
} else {
2072
2072
// We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2073
- // update).
2074
- debug_assert!(false);
2075
- let channel_id = *$chan_entry.key();
2076
- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2077
- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2078
- $chan_entry.get_mut(), &channel_id);
2079
- $chan_entry.remove();
2080
- Err(err)
2073
+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2074
+ // release builds.
2075
+ panic!("Initial Monitors should not exist for non-funded channels");
2081
2076
}
2082
2077
};
2083
2078
($self: ident, $funding_txo: expr, $update: expr, $peer_state_lock: expr, $peer_state: expr, $per_peer_state_lock: expr, $chan: expr, MANUALLY_REMOVING, $remove: expr) => { {
@@ -2107,14 +2102,9 @@ macro_rules! handle_new_monitor_update {
2107
2102
$per_peer_state_lock, chan, MANUALLY_REMOVING, { $chan_entry.remove() })
2108
2103
} else {
2109
2104
// We're not supposed to handle monitor updates for unfunded channels (they have no monitors to
2110
- // update).
2111
- debug_assert!(false);
2112
- let channel_id = *$chan_entry.key();
2113
- let (_, err) = convert_chan_phase_err!($self, ChannelError::Close(
2114
- "Cannot update monitor for unfunded channels as they don't have monitors yet".into()),
2115
- $chan_entry.get_mut(), &channel_id);
2116
- $chan_entry.remove();
2117
- Err(err)
2105
+ // update). Throwing away a monitor update could be dangerous, so we assert even in
2106
+ // release builds.
2107
+ panic!("Monitor updates should not exist for non-funded channels");
2118
2108
}
2119
2109
}
2120
2110
}
@@ -2529,7 +2519,7 @@ where
2529
2519
let _persistence_guard = PersistenceNotifierGuard::notify_on_drop(self);
2530
2520
2531
2521
let mut failed_htlcs: Vec<(HTLCSource, PaymentHash)>;
2532
- let result: Result<(), _> = loop {
2522
+ loop {
2533
2523
{
2534
2524
let per_peer_state = self.per_peer_state.read().unwrap();
2535
2525
@@ -2558,8 +2548,9 @@ where
2558
2548
2559
2549
// Update the monitor with the shutdown script if necessary.
2560
2550
if let Some(monitor_update) = monitor_update_opt.take() {
2561
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2562
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ());
2551
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
2552
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
2553
+ break;
2563
2554
}
2564
2555
2565
2556
if chan.is_shutdown() {
@@ -2572,7 +2563,7 @@ where
2572
2563
self.issue_channel_close_events(&chan.context, ClosureReason::HolderForceClosed);
2573
2564
}
2574
2565
}
2575
- break Ok(()) ;
2566
+ break;
2576
2567
}
2577
2568
},
2578
2569
hash_map::Entry::Vacant(_) => (),
@@ -2591,7 +2582,6 @@ where
2591
2582
self.fail_htlc_backwards_internal(&htlc_source.0, &htlc_source.1, &reason, receiver);
2592
2583
}
2593
2584
2594
- let _ = handle_error!(self, result, *counterparty_node_id);
2595
2585
Ok(())
2596
2586
}
2597
2587
@@ -3334,8 +3324,7 @@ where
3334
3324
match break_chan_phase_entry!(self, send_res, chan_phase_entry) {
3335
3325
Some(monitor_update) => {
3336
3326
match handle_new_monitor_update!(self, funding_txo, monitor_update, peer_state_lock, peer_state, per_peer_state, chan_phase_entry) {
3337
- Err(e) => break Err(e),
3338
- Ok(false) => {
3327
+ false => {
3339
3328
// Note that MonitorUpdateInProgress here indicates (per function
3340
3329
// docs) that we will resend the commitment update once monitor
3341
3330
// updating completes. Therefore, we must return an error
@@ -3344,7 +3333,7 @@ where
3344
3333
// MonitorUpdateInProgress, below.
3345
3334
return Err(APIError::MonitorUpdateInProgress);
3346
3335
},
3347
- Ok( true) => {},
3336
+ true => {},
3348
3337
}
3349
3338
},
3350
3339
None => {},
@@ -4526,7 +4515,7 @@ where
4526
4515
},
4527
4516
BackgroundEvent::MonitorUpdateRegeneratedOnStartup { counterparty_node_id, funding_txo, update } => {
4528
4517
let mut updated_chan = false;
4529
- let res = {
4518
+ {
4530
4519
let per_peer_state = self.per_peer_state.read().unwrap();
4531
4520
if let Some(peer_state_mutex) = per_peer_state.get(&counterparty_node_id) {
4532
4521
let mut peer_state_lock = peer_state_mutex.lock().unwrap();
@@ -4535,24 +4524,16 @@ where
4535
4524
hash_map::Entry::Occupied(mut chan_phase) => {
4536
4525
updated_chan = true;
4537
4526
handle_new_monitor_update!(self, funding_txo, update.clone(),
4538
- peer_state_lock, peer_state, per_peer_state, chan_phase).map(|_| ())
4527
+ peer_state_lock, peer_state, per_peer_state, chan_phase);
4539
4528
},
4540
- hash_map::Entry::Vacant(_) => Ok(()) ,
4529
+ hash_map::Entry::Vacant(_) => {} ,
4541
4530
}
4542
- } else { Ok(()) }
4543
- };
4531
+ }
4532
+ }
4544
4533
if !updated_chan {
4545
4534
// TODO: Track this as in-flight even though the channel is closed.
4546
4535
let _ = self.chain_monitor.update_channel(funding_txo, &update);
4547
4536
}
4548
- // TODO: If this channel has since closed, we're likely providing a payment
4549
- // preimage update, which we must ensure is durable! We currently don't,
4550
- // however, ensure that.
4551
- if res.is_err() {
4552
- log_error!(self.logger,
4553
- "Failed to provide ChannelMonitorUpdate to closed channel! This likely lost us a payment preimage!");
4554
- }
4555
- let _ = handle_error!(self, res, counterparty_node_id);
4556
4537
},
4557
4538
BackgroundEvent::MonitorUpdatesComplete { counterparty_node_id, channel_id } => {
4558
4539
let per_peer_state = self.per_peer_state.read().unwrap();
@@ -5275,15 +5256,8 @@ where
5275
5256
peer_state.monitor_update_blocked_actions.entry(chan_id).or_insert(Vec::new()).push(action);
5276
5257
}
5277
5258
if !during_init {
5278
- let res = handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
5259
+ handle_new_monitor_update!(self, prev_hop.outpoint, monitor_update, peer_state_lock,
5279
5260
peer_state, per_peer_state, chan_phase_entry);
5280
- if let Err(e) = res {
5281
- // TODO: This is a *critical* error - we probably updated the outbound edge
5282
- // of the HTLC's monitor with a preimage. We should retry this monitor
5283
- // update over and over again until morale improves.
5284
- log_error!(self.logger, "Failed to update channel monitor with preimage {:?}", payment_preimage);
5285
- return Err((counterparty_node_id, e));
5286
- }
5287
5261
} else {
5288
5262
// If we're running during init we cannot update a monitor directly -
5289
5263
// they probably haven't actually been loaded yet. Instead, push the
@@ -5934,24 +5908,13 @@ where
5934
5908
});
5935
5909
5936
5910
if let ChannelPhase::Funded(chan) = e.insert(ChannelPhase::Funded(chan)) {
5937
- let mut res = handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
5911
+ handle_new_monitor_update!(self, persist_state, peer_state_lock, peer_state,
5938
5912
per_peer_state, chan, MANUALLY_REMOVING_INITIAL_MONITOR,
5939
5913
{ peer_state.channel_by_id.remove(&new_channel_id) });
5940
-
5941
- // Note that we reply with the new channel_id in error messages if we gave up on the
5942
- // channel, not the temporary_channel_id. This is compatible with ourselves, but the
5943
- // spec is somewhat ambiguous here. Not a huge deal since we'll send error messages for
5944
- // any messages referencing a previously-closed channel anyway.
5945
- // We do not propagate the monitor update to the user as it would be for a monitor
5946
- // that we didn't manage to store (and that we don't care about - we don't respond
5947
- // with the funding_signed so the channel can never go on chain).
5948
- if let Err(MsgHandleErrInternal { shutdown_finish: Some((res, _)), .. }) = &mut res {
5949
- res.0 = None;
5950
- }
5951
- res.map(|_| ())
5952
5914
} else {
5953
5915
unreachable!("This must be a funded channel as we just inserted it.");
5954
5916
}
5917
+ Ok(())
5955
5918
} else {
5956
5919
log_error!(self.logger, "Persisting initial ChannelMonitor failed, implying the funding outpoint was duplicated");
5957
5920
return Err(MsgHandleErrInternal::send_err_msg_no_close(
@@ -5982,16 +5945,8 @@ where
5982
5945
let monitor = try_chan_phase_entry!(self,
5983
5946
chan.funding_signed(&msg, best_block, &self.signer_provider, &self.logger), chan_phase_entry);
5984
5947
if let Ok(persist_status) = self.chain_monitor.watch_channel(chan.context.get_funding_txo().unwrap(), monitor) {
5985
- let mut res = handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5986
- if let Err(MsgHandleErrInternal { ref mut shutdown_finish, .. }) = res {
5987
- // We weren't able to watch the channel to begin with, so no updates should be made on
5988
- // it. Previously, full_stack_target found an (unreachable) panic when the
5989
- // monitor update contained within `shutdown_finish` was applied.
5990
- if let Some((ref mut shutdown_finish, _)) = shutdown_finish {
5991
- shutdown_finish.0.take();
5992
- }
5993
- }
5994
- res.map(|_| ())
5948
+ handle_new_monitor_update!(self, persist_status, peer_state_lock, peer_state, per_peer_state, chan_phase_entry, INITIAL_MONITOR);
5949
+ Ok(())
5995
5950
} else {
5996
5951
try_chan_phase_entry!(self, Err(ChannelError::Close("Channel funding outpoint was a duplicate".to_owned())), chan_phase_entry)
5997
5952
}
@@ -6096,8 +6051,8 @@ where
6096
6051
}
6097
6052
// Update the monitor with the shutdown script if necessary.
6098
6053
if let Some(monitor_update) = monitor_update_opt {
6099
- break handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
6100
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ()) ;
6054
+ handle_new_monitor_update!(self, funding_txo_opt.unwrap(), monitor_update,
6055
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
6101
6056
}
6102
6057
break Ok(());
6103
6058
},
@@ -6350,8 +6305,9 @@ where
6350
6305
let monitor_update_opt = try_chan_phase_entry!(self, chan.commitment_signed(&msg, &self.logger), chan_phase_entry);
6351
6306
if let Some(monitor_update) = monitor_update_opt {
6352
6307
handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update, peer_state_lock,
6353
- peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6354
- } else { Ok(()) }
6308
+ peer_state, per_peer_state, chan_phase_entry);
6309
+ }
6310
+ Ok(())
6355
6311
} else {
6356
6312
return try_chan_phase_entry!(self, Err(ChannelError::Close(
6357
6313
"Got a commitment_signed message for an unfunded channel!".into())), chan_phase_entry);
@@ -6519,13 +6475,13 @@ where
6519
6475
} else { false };
6520
6476
let (htlcs_to_fail, monitor_update_opt) = try_chan_phase_entry!(self,
6521
6477
chan.revoke_and_ack(&msg, &self.fee_estimator, &self.logger, mon_update_blocked), chan_phase_entry);
6522
- let res = if let Some(monitor_update) = monitor_update_opt {
6478
+ if let Some(monitor_update) = monitor_update_opt {
6523
6479
let funding_txo = funding_txo_opt
6524
6480
.expect("Funding outpoint must have been set for RAA handling to succeed");
6525
6481
handle_new_monitor_update!(self, funding_txo, monitor_update,
6526
- peer_state_lock, peer_state, per_peer_state, chan_phase_entry).map(|_| ())
6527
- } else { Ok(()) };
6528
- (htlcs_to_fail, res )
6482
+ peer_state_lock, peer_state, per_peer_state, chan_phase_entry);
6483
+ }
6484
+ (htlcs_to_fail, Ok(()) )
6529
6485
} else {
6530
6486
return try_chan_phase_entry!(self, Err(ChannelError::Close(
6531
6487
"Got a revoke_and_ack message for an unfunded channel!".into())), chan_phase_entry);
@@ -6796,7 +6752,6 @@ where
6796
6752
fn check_free_holding_cells(&self) -> bool {
6797
6753
let mut has_monitor_update = false;
6798
6754
let mut failed_htlcs = Vec::new();
6799
- let mut handle_errors = Vec::new();
6800
6755
6801
6756
// Walk our list of channels and find any that need to update. Note that when we do find an
6802
6757
// update, if it includes actions that must be taken afterwards, we have to drop the
@@ -6822,12 +6777,9 @@ where
6822
6777
has_monitor_update = true;
6823
6778
6824
6779
let channel_id: ChannelId = *channel_id;
6825
- let res = handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
6780
+ handle_new_monitor_update!(self, funding_txo.unwrap(), monitor_update,
6826
6781
peer_state_lock, peer_state, per_peer_state, chan, MANUALLY_REMOVING,
6827
6782
peer_state.channel_by_id.remove(&channel_id));
6828
- if res.is_err() {
6829
- handle_errors.push((counterparty_node_id, res));
6830
- }
6831
6783
continue 'peer_loop;
6832
6784
}
6833
6785
}
@@ -6837,15 +6789,11 @@ where
6837
6789
break 'peer_loop;
6838
6790
}
6839
6791
6840
- let has_update = has_monitor_update || !failed_htlcs.is_empty() || !handle_errors.is_empty() ;
6792
+ let has_update = has_monitor_update || !failed_htlcs.is_empty();
6841
6793
for (failures, channel_id, counterparty_node_id) in failed_htlcs.drain(..) {
6842
6794
self.fail_holding_cell_htlcs(failures, channel_id, &counterparty_node_id);
6843
6795
}
6844
6796
6845
- for (counterparty_node_id, err) in handle_errors.drain(..) {
6846
- let _ = handle_error!(self, err, counterparty_node_id);
6847
- }
6848
-
6849
6797
has_update
6850
6798
}
6851
6799
@@ -7172,11 +7120,8 @@ where
7172
7120
if let Some((monitor_update, further_update_exists)) = chan.unblock_next_blocked_monitor_update() {
7173
7121
log_debug!(self.logger, "Unlocking monitor updating for channel {} and updating monitor",
7174
7122
channel_funding_outpoint.to_channel_id());
7175
- if let Err(e) = handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
7176
- peer_state_lck, peer_state, per_peer_state, chan_phase_entry)
7177
- {
7178
- errors.push((e, counterparty_node_id));
7179
- }
7123
+ handle_new_monitor_update!(self, channel_funding_outpoint, monitor_update,
7124
+ peer_state_lck, peer_state, per_peer_state, chan_phase_entry);
7180
7125
if further_update_exists {
7181
7126
// If there are more `ChannelMonitorUpdate`s to process, restart at the
7182
7127
// top of the loop.
0 commit comments