diff --git a/lightning-background-processor/src/lib.rs b/lightning-background-processor/src/lib.rs index 7d705bdcc3a..4d327ed5efe 100644 --- a/lightning-background-processor/src/lib.rs +++ b/lightning-background-processor/src/lib.rs @@ -64,8 +64,8 @@ use alloc::vec::Vec; /// * Monitoring whether the [`ChannelManager`] needs to be re-persisted to disk, and if so, /// writing it to disk/backups by invoking the callback given to it at startup. /// [`ChannelManager`] persistence should be done in the background. -/// * Calling [`ChannelManager::timer_tick_occurred`] and [`PeerManager::timer_tick_occurred`] -/// at the appropriate intervals. +/// * Calling [`ChannelManager::timer_tick_occurred`], [`ChainMonitor::rebroadcast_pending_claims`] +/// and [`PeerManager::timer_tick_occurred`] at the appropriate intervals. /// * Calling [`NetworkGraph::remove_stale_channels_and_tracking`] (if a [`GossipSync`] with a /// [`NetworkGraph`] is provided to [`BackgroundProcessor::start`]). /// @@ -116,12 +116,17 @@ const FIRST_NETWORK_PRUNE_TIMER: u64 = 60; #[cfg(test)] const FIRST_NETWORK_PRUNE_TIMER: u64 = 1; +#[cfg(not(test))] +const REBROADCAST_TIMER: u64 = 30; +#[cfg(test)] +const REBROADCAST_TIMER: u64 = 1; + #[cfg(feature = "futures")] /// core::cmp::min is not currently const, so we define a trivial (and equivalent) replacement const fn min_u64(a: u64, b: u64) -> u64 { if a < b { a } else { b } } #[cfg(feature = "futures")] const FASTEST_TIMER: u64 = min_u64(min_u64(FRESHNESS_TIMER, PING_TIMER), - min_u64(SCORER_PERSIST_TIMER, FIRST_NETWORK_PRUNE_TIMER)); + min_u64(SCORER_PERSIST_TIMER, min_u64(FIRST_NETWORK_PRUNE_TIMER, REBROADCAST_TIMER))); /// Either [`P2PGossipSync`] or [`RapidGossipSync`]. pub enum GossipSync< @@ -270,11 +275,14 @@ macro_rules! define_run_body { => { { log_trace!($logger, "Calling ChannelManager's timer_tick_occurred on startup"); $channel_manager.timer_tick_occurred(); + log_trace!($logger, "Rebroadcasting monitor's pending claims on startup"); + $chain_monitor.rebroadcast_pending_claims(); let mut last_freshness_call = $get_timer(FRESHNESS_TIMER); let mut last_ping_call = $get_timer(PING_TIMER); let mut last_prune_call = $get_timer(FIRST_NETWORK_PRUNE_TIMER); let mut last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER); + let mut last_rebroadcast_call = $get_timer(REBROADCAST_TIMER); let mut have_pruned = false; loop { @@ -372,6 +380,12 @@ macro_rules! define_run_body { } last_scorer_persist_call = $get_timer(SCORER_PERSIST_TIMER); } + + if $timer_elapsed(&mut last_rebroadcast_call, REBROADCAST_TIMER) { + log_trace!($logger, "Rebroadcasting monitor's pending claims"); + $chain_monitor.rebroadcast_pending_claims(); + last_rebroadcast_call = $get_timer(REBROADCAST_TIMER); + } } // After we exit, ensure we persist the ChannelManager one final time - this avoids @@ -1189,8 +1203,9 @@ mod tests { #[test] fn test_timer_tick_called() { - // Test that ChannelManager's and PeerManager's `timer_tick_occurred` is called every - // `FRESHNESS_TIMER`. + // Test that `ChannelManager::timer_tick_occurred` is called every `FRESHNESS_TIMER`, + // `ChainMonitor::rebroadcast_pending_claims` is called every `REBROADCAST_TIMER`, and + // `PeerManager::timer_tick_occurred` every `PING_TIMER`. let nodes = create_nodes(1, "test_timer_tick_called".to_string()); let data_dir = nodes[0].persister.get_data_dir(); let persister = Arc::new(Persister::new(data_dir)); @@ -1198,10 +1213,12 @@ mod tests { let bg_processor = BackgroundProcessor::start(persister, event_handler, nodes[0].chain_monitor.clone(), nodes[0].node.clone(), nodes[0].no_gossip_sync(), nodes[0].peer_manager.clone(), nodes[0].logger.clone(), Some(nodes[0].scorer.clone())); loop { let log_entries = nodes[0].logger.lines.lock().unwrap(); - let desired_log = "Calling ChannelManager's timer_tick_occurred".to_string(); - let second_desired_log = "Calling PeerManager's timer_tick_occurred".to_string(); - if log_entries.get(&("lightning_background_processor".to_string(), desired_log)).is_some() && - log_entries.get(&("lightning_background_processor".to_string(), second_desired_log)).is_some() { + let desired_log_1 = "Calling ChannelManager's timer_tick_occurred".to_string(); + let desired_log_2 = "Calling PeerManager's timer_tick_occurred".to_string(); + let desired_log_3 = "Rebroadcasting monitor's pending claims".to_string(); + if log_entries.get(&("lightning_background_processor".to_string(), desired_log_1)).is_some() && + log_entries.get(&("lightning_background_processor".to_string(), desired_log_2)).is_some() && + log_entries.get(&("lightning_background_processor".to_string(), desired_log_3)).is_some() { break } } diff --git a/lightning/src/chain/chainmonitor.rs b/lightning/src/chain/chainmonitor.rs index fdaa25f69b8..e7c2b0f18ec 100644 --- a/lightning/src/chain/chainmonitor.rs +++ b/lightning/src/chain/chainmonitor.rs @@ -217,8 +217,15 @@ impl Deref for LockedChannelMonitor< /// or used independently to monitor channels remotely. See the [module-level documentation] for /// details. /// +/// Note that `ChainMonitor` should regularly trigger rebroadcasts/fee bumps of pending claims from +/// a force-closed channel. This is crucial in preventing certain classes of pinning attacks, +/// detecting substantial mempool feerate changes between blocks, and ensuring reliability if +/// broadcasting fails. We recommend invoking this every 30 seconds, or lower if running in an +/// environment with spotty connections, like on mobile. +/// /// [`ChannelManager`]: crate::ln::channelmanager::ChannelManager /// [module-level documentation]: crate::chain::chainmonitor +/// [`rebroadcast_pending_claims`]: Self::rebroadcast_pending_claims pub struct ChainMonitor where C::Target: chain::Filter, T::Target: BroadcasterInterface, @@ -533,6 +540,20 @@ where C::Target: chain::Filter, pub fn get_update_future(&self) -> Future { self.event_notifier.get_future() } + + /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is + /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool + /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend + /// invoking this every 30 seconds, or lower if running in an environment with spotty + /// connections, like on mobile. + pub fn rebroadcast_pending_claims(&self) { + let monitors = self.monitors.read().unwrap(); + for (_, monitor_holder) in &*monitors { + monitor_holder.monitor.rebroadcast_pending_claims( + &*self.broadcaster, &*self.fee_estimator, &*self.logger + ) + } + } } impl diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 5ff297b1af7..cb2183f8509 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -1467,6 +1467,27 @@ impl ChannelMonitor { pub fn current_best_block(&self) -> BestBlock { self.inner.lock().unwrap().best_block.clone() } + + /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is + /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool + /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend + /// invoking this every 30 seconds, or lower if running in an environment with spotty + /// connections, like on mobile. + pub fn rebroadcast_pending_claims( + &self, broadcaster: B, fee_estimator: F, logger: L, + ) + where + B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + let fee_estimator = LowerBoundedFeeEstimator::new(fee_estimator); + let mut inner = self.inner.lock().unwrap(); + let current_height = inner.best_block.height; + inner.onchain_tx_handler.rebroadcast_pending_claims( + current_height, &broadcaster, &fee_estimator, &logger, + ); + } } impl ChannelMonitorImpl { diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index cd0cb08eab3..04776fbb089 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -481,6 +481,59 @@ impl OnchainTxHandler events.into_iter().map(|(_, event)| event).collect() } + /// Triggers rebroadcasts/fee-bumps of pending claims from a force-closed channel. This is + /// crucial in preventing certain classes of pinning attacks, detecting substantial mempool + /// feerate changes between blocks, and ensuring reliability if broadcasting fails. We recommend + /// invoking this every 30 seconds, or lower if running in an environment with spotty + /// connections, like on mobile. + pub(crate) fn rebroadcast_pending_claims( + &mut self, current_height: u32, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, + logger: &L, + ) + where + B::Target: BroadcasterInterface, + F::Target: FeeEstimator, + L::Target: Logger, + { + let mut bump_requests = Vec::with_capacity(self.pending_claim_requests.len()); + for (package_id, request) in self.pending_claim_requests.iter() { + let inputs = request.outpoints(); + log_info!(logger, "Triggering rebroadcast/fee-bump for request with inputs {:?}", inputs); + bump_requests.push((*package_id, request.clone())); + } + for (package_id, request) in bump_requests { + self.generate_claim(current_height, &request, false /* force_feerate_bump */, fee_estimator, logger) + .map(|(_, new_feerate, claim)| { + let mut bumped_feerate = false; + if let Some(mut_request) = self.pending_claim_requests.get_mut(&package_id) { + bumped_feerate = request.previous_feerate() > new_feerate; + mut_request.set_feerate(new_feerate); + } + match claim { + OnchainClaim::Tx(tx) => { + let log_start = if bumped_feerate { "Broadcasting RBF-bumped" } else { "Rebroadcasting" }; + log_info!(logger, "{} onchain {}", log_start, log_tx!(tx)); + broadcaster.broadcast_transaction(&tx); + }, + #[cfg(anchors)] + OnchainClaim::Event(event) => { + let log_start = if bumped_feerate { "Yielding fee-bumped" } else { "Replaying" }; + log_info!(logger, "{} onchain event to spend inputs {:?}", log_start, + request.outpoints()); + #[cfg(debug_assertions)] { + debug_assert!(request.requires_external_funding()); + let num_existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == package_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|event| event.0 != package_id); + self.pending_claim_events.push((package_id, event)); + } + } + }); + } + } + /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty /// onchain) lays on the assumption of claim transactions getting confirmed before timelock /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck @@ -489,9 +542,13 @@ impl OnchainTxHandler /// /// Panics if there are signing errors, because signing operations in reaction to on-chain /// events are not expected to fail, and if they do, we may lose funds. - fn generate_claim(&mut self, cur_height: u32, cached_request: &PackageTemplate, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(u32, u64, OnchainClaim)> - where F::Target: FeeEstimator, - L::Target: Logger, + fn generate_claim( + &mut self, cur_height: u32, cached_request: &PackageTemplate, force_feerate_bump: bool, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Option<(u32, u64, OnchainClaim)> + where + F::Target: FeeEstimator, + L::Target: Logger, { let request_outpoints = cached_request.outpoints(); if request_outpoints.is_empty() { @@ -538,8 +595,9 @@ impl OnchainTxHandler #[cfg(anchors)] { // Attributes are not allowed on if expressions on our current MSRV of 1.41. if cached_request.requires_external_funding() { - let target_feerate_sat_per_1000_weight = cached_request - .compute_package_feerate(fee_estimator, ConfirmationTarget::HighPriority); + let target_feerate_sat_per_1000_weight = cached_request.compute_package_feerate( + fee_estimator, ConfirmationTarget::HighPriority, force_feerate_bump + ); if let Some(htlcs) = cached_request.construct_malleable_package_with_external_funding(self) { return Some(( new_timer, @@ -558,7 +616,8 @@ impl OnchainTxHandler let predicted_weight = cached_request.package_weight(&self.destination_script); if let Some((output_value, new_feerate)) = cached_request.compute_package_output( - predicted_weight, self.destination_script.dust_value().to_sat(), fee_estimator, logger, + predicted_weight, self.destination_script.dust_value().to_sat(), + force_feerate_bump, fee_estimator, logger, ) { assert!(new_feerate != 0); @@ -601,7 +660,7 @@ impl OnchainTxHandler // counterparty's latest commitment don't have any HTLCs present. let conf_target = ConfirmationTarget::HighPriority; let package_target_feerate_sat_per_1000_weight = cached_request - .compute_package_feerate(fee_estimator, conf_target); + .compute_package_feerate(fee_estimator, conf_target, force_feerate_bump); Some(( new_timer, package_target_feerate_sat_per_1000_weight as u64, @@ -700,7 +759,9 @@ impl OnchainTxHandler // Generate claim transactions and track them to bump if necessary at // height timer expiration (i.e in how many blocks we're going to take action). for mut req in preprocessed_requests { - if let Some((new_timer, new_feerate, claim)) = self.generate_claim(cur_height, &req, &*fee_estimator, &*logger) { + if let Some((new_timer, new_feerate, claim)) = self.generate_claim( + cur_height, &req, true /* force_feerate_bump */, &*fee_estimator, &*logger, + ) { req.set_timer(new_timer); req.set_feerate(new_feerate); let package_id = match claim { @@ -893,7 +954,9 @@ impl OnchainTxHandler // Build, bump and rebroadcast tx accordingly log_trace!(logger, "Bumping {} candidates", bump_candidates.len()); for (package_id, request) in bump_candidates.iter() { - if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) { + if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim( + cur_height, &request, true /* force_feerate_bump */, &*fee_estimator, &*logger, + ) { match bump_claim { OnchainClaim::Tx(bump_tx) => { log_info!(logger, "Broadcasting RBF-bumped onchain {}", log_tx!(bump_tx)); @@ -973,7 +1036,9 @@ impl OnchainTxHandler } } for ((_package_id, _), ref mut request) in bump_candidates.iter_mut() { - if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) { + if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim( + height, &request, true /* force_feerate_bump */, fee_estimator, &&*logger + ) { request.set_timer(new_timer); request.set_feerate(new_feerate); match bump_claim { diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 786451ee50c..b3f6f50ed1d 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -554,6 +554,9 @@ impl PackageTemplate { pub(crate) fn aggregable(&self) -> bool { self.aggregable } + pub(crate) fn previous_feerate(&self) -> u64 { + self.feerate_previous + } pub(crate) fn set_feerate(&mut self, new_feerate: u64) { self.feerate_previous = new_feerate; } @@ -762,16 +765,23 @@ impl PackageTemplate { /// Returns value in satoshis to be included as package outgoing output amount and feerate /// which was used to generate the value. Will not return less than `dust_limit_sats` for the /// value. - pub(crate) fn compute_package_output(&self, predicted_weight: usize, dust_limit_sats: u64, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(u64, u64)> - where F::Target: FeeEstimator, - L::Target: Logger, + pub(crate) fn compute_package_output( + &self, predicted_weight: usize, dust_limit_sats: u64, force_feerate_bump: bool, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, + ) -> Option<(u64, u64)> + where + F::Target: FeeEstimator, + L::Target: Logger, { debug_assert!(self.malleability == PackageMalleability::Malleable, "The package output is fixed for non-malleable packages"); let input_amounts = self.package_amount(); assert!(dust_limit_sats as i64 > 0, "Output script must be broadcastable/have a 'real' dust limit."); // If old feerate is 0, first iteration of this claim, use normal fee calculation if self.feerate_previous != 0 { - if let Some((new_fee, feerate)) = feerate_bump(predicted_weight, input_amounts, self.feerate_previous, fee_estimator, logger) { + if let Some((new_fee, feerate)) = feerate_bump( + predicted_weight, input_amounts, self.feerate_previous, force_feerate_bump, + fee_estimator, logger, + ) { return Some((cmp::max(input_amounts as i64 - new_fee as i64, dust_limit_sats as i64) as u64, feerate)); } } else { @@ -784,16 +794,19 @@ impl PackageTemplate { #[cfg(anchors)] /// Computes a feerate based on the given confirmation target. If a previous feerate was used, - /// and the new feerate is below it, we'll use a 25% increase of the previous feerate instead of - /// the new one. + /// the new feerate is below it, and `force_feerate_bump` is set, we'll use a 25% increase of + /// the previous feerate instead of the new feerate. pub(crate) fn compute_package_feerate( &self, fee_estimator: &LowerBoundedFeeEstimator, conf_target: ConfirmationTarget, + force_feerate_bump: bool, ) -> u32 where F::Target: FeeEstimator { let feerate_estimate = fee_estimator.bounded_sat_per_1000_weight(conf_target); if self.feerate_previous != 0 { // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... if feerate_estimate as u64 > self.feerate_previous { feerate_estimate + } else if !force_feerate_bump { + self.feerate_previous.try_into().unwrap_or(u32::max_value()) } else { // ...else just increase the previous feerate by 25% (because that's a nice number) (self.feerate_previous + (self.feerate_previous / 4)).try_into().unwrap_or(u32::max_value()) @@ -945,32 +958,47 @@ fn compute_fee_from_spent_amounts(input_amounts: u64, predic /// Attempt to propose a bumping fee for a transaction from its spent output's values and predicted /// weight. If feerates proposed by the fee-estimator have been increasing since last fee-bumping -/// attempt, use them. Otherwise, blindly bump the feerate by 25% of the previous feerate. We also -/// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust -/// the new fee to meet the RBF policy requirement. -fn feerate_bump(predicted_weight: usize, input_amounts: u64, previous_feerate: u64, fee_estimator: &LowerBoundedFeeEstimator, logger: &L) -> Option<(u64, u64)> - where F::Target: FeeEstimator, - L::Target: Logger, +/// attempt, use them. If `force_feerate_bump` is set, we bump the feerate by 25% of the previous +/// feerate, or just use the previous feerate otherwise. If a feerate bump did happen, we also +/// verify that those bumping heuristics respect BIP125 rules 3) and 4) and if required adjust the +/// new fee to meet the RBF policy requirement. +fn feerate_bump( + predicted_weight: usize, input_amounts: u64, previous_feerate: u64, force_feerate_bump: bool, + fee_estimator: &LowerBoundedFeeEstimator, logger: &L, +) -> Option<(u64, u64)> +where + F::Target: FeeEstimator, + L::Target: Logger, { // If old feerate inferior to actual one given back by Fee Estimator, use it to compute new fee... - let new_fee = if let Some((new_fee, _)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) { - let updated_feerate = new_fee / (predicted_weight as u64 * 1000); - if updated_feerate > previous_feerate { - new_fee + let (new_fee, new_feerate) = if let Some((new_fee, new_feerate)) = compute_fee_from_spent_amounts(input_amounts, predicted_weight, fee_estimator, logger) { + if new_feerate > previous_feerate { + (new_fee, new_feerate) + } else if !force_feerate_bump { + let previous_fee = previous_feerate * (predicted_weight as u64) / 1000; + (previous_fee, previous_feerate) } else { // ...else just increase the previous feerate by 25% (because that's a nice number) - let new_fee = previous_feerate * (predicted_weight as u64) / 750; - if input_amounts <= new_fee { + let bumped_feerate = previous_feerate + (previous_feerate / 4); + let bumped_fee = bumped_feerate * (predicted_weight as u64) / 1000; + if input_amounts <= bumped_fee { log_warn!(logger, "Can't 25% bump new claiming tx, amount {} is too small", input_amounts); return None; } - new_fee + (bumped_fee, bumped_feerate) } } else { log_warn!(logger, "Can't new-estimation bump new claiming tx, amount {} is too small", input_amounts); return None; }; + // Our feerates should never decrease. If it hasn't changed though, we just need to + // rebroadcast/re-sign the previous claim. + debug_assert!(new_feerate >= previous_feerate); + if new_feerate == previous_feerate { + return Some((new_fee, new_feerate)); + } + let previous_fee = previous_feerate * (predicted_weight as u64) / 1000; let min_relay_fee = MIN_RELAY_FEE_SAT_PER_1000_WEIGHT * (predicted_weight as u64) / 1000; // BIP 125 Opt-in Full Replace-by-Fee Signaling diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index 6d96877625c..b695e20a8dd 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -151,6 +151,20 @@ impl ConnectStyle { } } + pub fn updates_best_block_first(&self) -> bool { + match self { + ConnectStyle::BestBlockFirst => true, + ConnectStyle::BestBlockFirstSkippingBlocks => true, + ConnectStyle::BestBlockFirstReorgsOnlyTip => true, + ConnectStyle::TransactionsFirst => false, + ConnectStyle::TransactionsFirstSkippingBlocks => false, + ConnectStyle::TransactionsDuplicativelyFirstSkippingBlocks => false, + ConnectStyle::HighlyRedundantTransactionsFirstSkippingBlocks => false, + ConnectStyle::TransactionsFirstReorgsOnlyTip => false, + ConnectStyle::FullBlockViaListen => false, + } + } + fn random_style() -> ConnectStyle { #[cfg(feature = "std")] { use core::hash::{BuildHasher, Hasher}; diff --git a/lightning/src/ln/functional_tests.rs b/lightning/src/ln/functional_tests.rs index 401667f31f6..17f8e1597b7 100644 --- a/lightning/src/ln/functional_tests.rs +++ b/lightning/src/ln/functional_tests.rs @@ -2991,9 +2991,9 @@ fn do_test_htlc_on_chain_timeout(connect_style: ConnectStyle) { if nodes[1].connect_style.borrow().skips_blocks() { assert_eq!(txn.len(), 1); } else { - assert_eq!(txn.len(), 2); // Extra rebroadcast of timeout transaction + assert_eq!(txn.len(), 3); // Two extra fee bumps for timeout transaction } - check_spends!(txn[0], commitment_tx[0]); + txn.iter().for_each(|tx| check_spends!(tx, commitment_tx[0])); assert_eq!(txn[0].clone().input[0].witness.last().unwrap().len(), ACCEPTED_HTLC_SCRIPT_WEIGHT); txn.remove(0) }; @@ -7510,7 +7510,7 @@ fn test_bump_penalty_txn_on_remote_commitment() { assert_ne!(feerate_preimage, 0); // After exhaustion of height timer, new bumped claim txn should have been broadcast, check it - connect_blocks(&nodes[1], 15); + connect_blocks(&nodes[1], 1); { let mut node_txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap(); assert_eq!(node_txn.len(), 1); diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index d09f4229c8c..5b2b29dbb76 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -1778,6 +1778,157 @@ fn test_restored_packages_retry() { do_test_restored_packages_retry(true); } +fn do_test_monitor_rebroadcast_pending_claims(anchors: bool) { + // Test that we will retry broadcasting pending claims for a force-closed channel on every + // `ChainMonitor::rebroadcast_pending_claims` call. + if anchors { + assert!(cfg!(anchors)); + } + let secp = Secp256k1::new(); + let mut chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut config = test_default_channel_config(); + if anchors { + #[cfg(anchors)] { + config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + } + } + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(config), Some(config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let (_, _, _, chan_id, funding_tx) = create_chan_between_nodes_with_value( + &nodes[0], &nodes[1], 1_000_000, 500_000_000 + ); + const HTLC_AMT_MSAT: u64 = 1_000_000; + const HTLC_AMT_SAT: u64 = HTLC_AMT_MSAT / 1000; + route_payment(&nodes[0], &[&nodes[1]], HTLC_AMT_MSAT); + + let htlc_expiry = nodes[0].best_block_info().1 + TEST_FINAL_CLTV + 1; + + let commitment_txn = get_local_commitment_txn!(&nodes[0], &chan_id); + assert_eq!(commitment_txn.len(), if anchors { 1 /* commitment tx only */} else { 2 /* commitment and htlc timeout tx */ }); + check_spends!(&commitment_txn[0], &funding_tx); + mine_transaction(&nodes[0], &commitment_txn[0]); + check_closed_broadcast!(&nodes[0], true); + check_closed_event(&nodes[0], 1, ClosureReason::CommitmentTxConfirmed, false); + check_added_monitors(&nodes[0], 1); + + // Set up a helper closure we'll use throughout our test. We should only expect retries without + // bumps if fees have not increased after a block has been connected (assuming the height timer + // re-evaluates at every block) or after `ChainMonitor::rebroadcast_pending_claims` is called. + let mut prev_htlc_tx_feerate = None; + let mut check_htlc_retry = |should_retry: bool, should_bump: bool| -> Option { + let (htlc_tx, htlc_tx_feerate) = if anchors { + assert!(nodes[0].tx_broadcaster.txn_broadcast().is_empty()); + let mut events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), if should_retry { 1 } else { 0 }); + if !should_retry { + return None; + } + #[allow(unused_assignments)] + let mut tx = Transaction { + version: 2, + lock_time: bitcoin::PackedLockTime::ZERO, + input: vec![], + output: vec![], + }; + #[allow(unused_assignments)] + let mut feerate = 0; + #[cfg(anchors)] { + feerate = if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { + target_feerate_sat_per_1000_weight, mut htlc_descriptors, tx_lock_time, + }) = events.pop().unwrap() { + assert_eq!(htlc_descriptors.len(), 1); + let descriptor = htlc_descriptors.pop().unwrap(); + assert_eq!(descriptor.commitment_txid, commitment_txn[0].txid()); + let htlc_output_idx = descriptor.htlc.transaction_output_index.unwrap() as usize; + assert!(htlc_output_idx < commitment_txn[0].output.len()); + tx.lock_time = tx_lock_time; + // Note that we don't care about actually making the HTLC transaction meet the + // feerate for the test, we just want to make sure the feerates we receive from + // the events never decrease. + tx.input.push(descriptor.unsigned_tx_input()); + let signer = nodes[0].keys_manager.derive_channel_keys( + descriptor.channel_value_satoshis, &descriptor.channel_keys_id, + ); + let per_commitment_point = signer.get_per_commitment_point( + descriptor.per_commitment_number, &secp + ); + tx.output.push(descriptor.tx_output(&per_commitment_point, &secp)); + let our_sig = signer.sign_holder_htlc_transaction(&mut tx, 0, &descriptor, &secp).unwrap(); + let witness_script = descriptor.witness_script(&per_commitment_point, &secp); + tx.input[0].witness = descriptor.tx_input_witness(&our_sig, &witness_script); + target_feerate_sat_per_1000_weight as u64 + } else { panic!("unexpected event"); }; + } + (tx, feerate) + } else { + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + let mut txn = nodes[0].tx_broadcaster.txn_broadcast(); + assert_eq!(txn.len(), if should_retry { 1 } else { 0 }); + if !should_retry { + return None; + } + let htlc_tx = txn.pop().unwrap(); + check_spends!(htlc_tx, commitment_txn[0]); + let htlc_tx_fee = HTLC_AMT_SAT - htlc_tx.output[0].value; + let htlc_tx_feerate = htlc_tx_fee * 1000 / htlc_tx.weight() as u64; + (htlc_tx, htlc_tx_feerate) + }; + if should_bump { + assert!(htlc_tx_feerate > prev_htlc_tx_feerate.take().unwrap()); + } else if let Some(prev_feerate) = prev_htlc_tx_feerate.take() { + assert_eq!(htlc_tx_feerate, prev_feerate); + } + prev_htlc_tx_feerate = Some(htlc_tx_feerate); + Some(htlc_tx) + }; + + // Connect blocks up to one before the HTLC expires. This should not result in a claim/retry. + connect_blocks(&nodes[0], htlc_expiry - nodes[0].best_block_info().1 - 2); + check_htlc_retry(false, false); + + // Connect one more block, producing our first claim. + connect_blocks(&nodes[0], 1); + check_htlc_retry(true, false); + + // Connect one more block, expecting a retry with a fee bump. Unfortunately, we cannot bump HTLC + // transactions pre-anchors. + connect_blocks(&nodes[0], 1); + check_htlc_retry(true, anchors); + + // Trigger a call and we should have another retry, but without a bump. + nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims(); + check_htlc_retry(true, false); + + // Double the feerate and trigger a call, expecting a fee-bumped retry. + *nodes[0].fee_estimator.sat_per_kw.lock().unwrap() *= 2; + nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims(); + check_htlc_retry(true, anchors); + + // Connect one more block, expecting a retry with a fee bump. Unfortunately, we cannot bump HTLC + // transactions pre-anchors. + connect_blocks(&nodes[0], 1); + let htlc_tx = check_htlc_retry(true, anchors).unwrap(); + + // Mine the HTLC transaction to ensure we don't retry claims while they're confirmed. + mine_transaction(&nodes[0], &htlc_tx); + // If we have a `ConnectStyle` that advertises the new block first without the transasctions, + // we'll receive an extra bumped claim. + if nodes[0].connect_style.borrow().updates_best_block_first() { + check_htlc_retry(true, anchors); + } + nodes[0].chain_monitor.chain_monitor.rebroadcast_pending_claims(); + check_htlc_retry(false, false); +} + +#[test] +fn test_monitor_timer_based_claim() { + do_test_monitor_rebroadcast_pending_claims(false); + #[cfg(anchors)] + do_test_monitor_rebroadcast_pending_claims(true); +} + #[cfg(anchors)] #[test] fn test_yield_anchors_events() {