From cfa8941cc59fc164bf95d7a698927dce0bcc4607 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 15:35:40 -0800 Subject: [PATCH 1/9] Implement PartialEq manually Since we don't store `pending_claim_events` within `OnchainTxHandler` as they'll be regenerated on restarts, we opt to implement `PartialEq` manually such that the field is not longer considered. --- lightning/src/chain/onchaintx.rs | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index d3ca02ca7a5..719d8314cbc 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -215,7 +215,6 @@ type PackageID = [u8; 32]; /// OnchainTxHandler receives claiming requests, aggregates them if it's sound, broadcast and /// do RBF bumping if possible. -#[derive(PartialEq)] pub struct OnchainTxHandler { destination_script: Script, holder_commitment: HolderCommitmentTransaction, @@ -265,6 +264,22 @@ pub struct OnchainTxHandler { pub(super) secp_ctx: Secp256k1, } +impl PartialEq for OnchainTxHandler { + fn eq(&self, other: &Self) -> bool { + // `signer`, `secp_ctx`, and `pending_claim_events` are excluded on purpose. + self.destination_script == other.destination_script && + self.holder_commitment == other.holder_commitment && + self.holder_htlc_sigs == other.holder_htlc_sigs && + self.prev_holder_commitment == other.prev_holder_commitment && + self.prev_holder_htlc_sigs == other.prev_holder_htlc_sigs && + self.channel_transaction_parameters == other.channel_transaction_parameters && + self.pending_claim_requests == other.pending_claim_requests && + self.claimable_outpoints == other.claimable_outpoints && + self.locktimed_packages == other.locktimed_packages && + self.onchain_events_awaiting_threshold_conf == other.onchain_events_awaiting_threshold_conf + } +} + const SERIALIZATION_VERSION: u8 = 1; const MIN_SERIALIZATION_VERSION: u8 = 1; From e7fb47b05ac30dd3772a059e79d56e2a4f6969e1 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 15:36:37 -0800 Subject: [PATCH 2/9] Derive Eq on structs behind anchors build flag --- lightning/src/ln/chan_utils.rs | 4 ++-- lightning/src/util/events.rs | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/lightning/src/ln/chan_utils.rs b/lightning/src/ln/chan_utils.rs index 378b4d794ff..8c636203ec6 100644 --- a/lightning/src/ln/chan_utils.rs +++ b/lightning/src/ln/chan_utils.rs @@ -811,7 +811,7 @@ pub fn build_anchor_input_witness(funding_key: &PublicKey, funding_sig: &Signatu /// /// Normally, this is converted to the broadcaster/countersignatory-organized DirectedChannelTransactionParameters /// before use, via the as_holder_broadcastable and as_counterparty_broadcastable functions. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct ChannelTransactionParameters { /// Holder public keys pub holder_pubkeys: ChannelPublicKeys, @@ -835,7 +835,7 @@ pub struct ChannelTransactionParameters { } /// Late-bound per-channel counterparty data used to build transactions. -#[derive(Clone, Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct CounterpartyChannelTransactionParameters { /// Counter-party public keys pub pubkeys: ChannelPublicKeys, diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 8c981fd1c48..75f2afbd233 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -256,7 +256,7 @@ impl_writeable_tlv_based_enum_upgradable!(HTLCDestination, #[cfg(anchors)] /// A descriptor used to sign for a commitment transaction's anchor output. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct AnchorDescriptor { /// A unique identifier used along with `channel_value_satoshis` to re-derive the /// [`InMemorySigner`] required to sign `input`. @@ -276,7 +276,7 @@ pub struct AnchorDescriptor { #[cfg(anchors)] /// A descriptor used to sign for a commitment transaction's HTLC output. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub struct HTLCDescriptor { /// A unique identifier used along with `channel_value_satoshis` to re-derive the /// [`InMemorySigner`] required to sign `input`. @@ -369,7 +369,7 @@ impl HTLCDescriptor { #[cfg(anchors)] /// Represents the different types of transactions, originating from LDK, to be bumped. -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] pub enum BumpTransactionEvent { /// Indicates that a channel featuring anchor outputs is to be closed by broadcasting the local /// commitment transaction. Since commitment transactions have a static feerate pre-agreed upon, From 19586267448c4767e3dd1f7642ba3e7d0e5d08ce Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 15:38:20 -0800 Subject: [PATCH 3/9] Fix stale import behind anchors build tag --- lightning/src/ln/monitor_tests.rs | 2 +- lightning/src/util/events.rs | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index c4435b470ca..af33ca7b737 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -10,7 +10,7 @@ //! Further functional tests which test blockchain reorganizations. #[cfg(anchors)] -use crate::chain::keysinterface::BaseSign; +use crate::chain::keysinterface::{ChannelSigner, EcdsaChannelSigner}; #[cfg(anchors)] use crate::chain::channelmonitor::LATENCY_GRACE_PERIOD_BLOCKS; use crate::chain::channelmonitor::{ANTI_REORG_DELAY, Balance}; diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 75f2afbd233..754283dfb8c 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -290,10 +290,10 @@ pub struct HTLCDescriptor { /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner pub channel_value_satoshis: u64, /// The necessary channel parameters that need to be provided to the re-derived - /// [`InMemorySigner`] through [`BaseSign::provide_channel_parameters`]. + /// [`InMemorySigner`] through [`ChannelSigner::provide_channel_parameters`]. /// /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner - /// [`BaseSign::provide_channel_parameters`]: crate::chain::keysinterface::BaseSign::provide_channel_parameters + /// [`ChannelSigner::provide_channel_parameters`]: crate::chain::keysinterface::ChannelSigner::provide_channel_parameters pub channel_parameters: ChannelTransactionParameters, /// The txid of the commitment transaction in which the HTLC output lives. pub commitment_txid: Txid, @@ -387,7 +387,7 @@ pub enum BumpTransactionEvent { /// child anchor transaction. To sign its anchor input, an [`InMemorySigner`] should be /// re-derived through [`KeysManager::derive_channel_keys`] with the help of /// [`AnchorDescriptor::channel_keys_id`] and [`AnchorDescriptor::channel_value_satoshis`]. The - /// anchor input signature can be computed with [`BaseSign::sign_holder_anchor_input`], + /// anchor input signature can be computed with [`EcdsaChannelSigner::sign_holder_anchor_input`], /// which can then be provided to [`build_anchor_input_witness`] along with the `funding_pubkey` /// to obtain the full witness required to spend. /// @@ -410,7 +410,7 @@ pub enum BumpTransactionEvent { /// /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys - /// [`BaseSign::sign_holder_anchor_input`]: crate::chain::keysinterface::BaseSign::sign_holder_anchor_input + /// [`EcdsaChannelSigner::sign_holder_anchor_input`]: crate::chain::keysinterface::EcdsaChannelSigner::sign_holder_anchor_input /// [`build_anchor_input_witness`]: crate::ln::chan_utils::build_anchor_input_witness ChannelClose { /// The target feerate that the transaction package, which consists of the commitment @@ -444,7 +444,7 @@ pub enum BumpTransactionEvent { /// HTLC transaction. To sign HTLC inputs, an [`InMemorySigner`] should be re-derived through /// [`KeysManager::derive_channel_keys`] with the help of `channel_keys_id` and /// `channel_value_satoshis`. Each HTLC input's signature can be computed with - /// [`BaseSign::sign_holder_htlc_transaction`], which can then be provided to + /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`], which can then be provided to /// [`HTLCDescriptor::tx_input_witness`] to obtain the fully signed witness required to spend. /// /// It is possible to receive more than one instance of this event if a valid HTLC transaction @@ -459,7 +459,7 @@ pub enum BumpTransactionEvent { /// /// [`InMemorySigner`]: crate::chain::keysinterface::InMemorySigner /// [`KeysManager::derive_channel_keys`]: crate::chain::keysinterface::KeysManager::derive_channel_keys - /// [`BaseSign::sign_holder_htlc_transaction`]: crate::chain::keysinterface::BaseSign::sign_holder_htlc_transaction + /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::chain::keysinterface::EcdsaChannelSigner::sign_holder_htlc_transaction /// [`HTLCDescriptor::tx_input_witness`]: HTLCDescriptor::tx_input_witness HTLCResolution { target_feerate_sat_per_1000_weight: u32, From 7c446b48921d873728c5e22cc18a2fcfb6fe79c9 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 16:14:03 -0800 Subject: [PATCH 4/9] Add missing docs for BumpHTLCResolution --- lightning/src/util/events.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lightning/src/util/events.rs b/lightning/src/util/events.rs index 754283dfb8c..b64022026e3 100644 --- a/lightning/src/util/events.rs +++ b/lightning/src/util/events.rs @@ -462,7 +462,10 @@ pub enum BumpTransactionEvent { /// [`EcdsaChannelSigner::sign_holder_htlc_transaction`]: crate::chain::keysinterface::EcdsaChannelSigner::sign_holder_htlc_transaction /// [`HTLCDescriptor::tx_input_witness`]: HTLCDescriptor::tx_input_witness HTLCResolution { + /// The target feerate that the resulting HTLC transaction must meet. target_feerate_sat_per_1000_weight: u32, + /// The set of pending HTLCs on the confirmed commitment that need to be claimed, preferably + /// by the same transaction. htlc_descriptors: Vec, }, } From 1638c8b34a727bc51362e6129c15af7098e90be4 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 16:24:30 -0800 Subject: [PATCH 5/9] Clarify OnchainEvent::Claim behavior The previous documentation was slightly incorrect, a `Claim` can also be from the counterparty if they happened to claim the same exact set of outputs as a claiming transaction we generated. --- lightning/src/chain/onchaintx.rs | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 719d8314cbc..6cb59b590aa 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -72,18 +72,23 @@ impl OnchainEventEntry { } } -/// Upon discovering of some classes of onchain tx by ChannelMonitor, we may have to take actions on it -/// once they mature to enough confirmations (ANTI_REORG_DELAY) +/// Events for claims the [`OnchainTxHandler`] has generated. Once the events are considered safe +/// from a chain reorg, the [`OnchainTxHandler`] will act accordingly. #[derive(PartialEq, Eq)] enum OnchainEvent { - /// Outpoint under claim process by our own tx, once this one get enough confirmations, we remove it from - /// bump-txn candidate buffer. + /// A pending request has been claimed by a transaction spending the exact same set of outpoints + /// as the request. This claim can either be ours or from the counterparty. Once the claiming + /// transaction has met [`ANTI_REORG_DELAY`] confirmations, we consider it final and remove the + /// pending request. Claim { package_id: PackageID, }, - /// Claim tx aggregate multiple claimable outpoints. One of the outpoint may be claimed by a counterparty party tx. - /// In this case, we need to drop the outpoint and regenerate a new claim tx. By safety, we keep tracking - /// the outpoint to be sure to resurect it back to the claim tx if reorgs happen. + /// The counterparty has claimed an outpoint from one of our pending requests through a + /// different transaction than ours. If our transaction was attempting to claim multiple + /// outputs, we need to drop the outpoint claimed by the counterparty and regenerate a new claim + /// transaction for ourselves. We keep tracking, separately, the outpoint claimed by the + /// counterparty up to [`ANTI_REORG_DELAY`] confirmations to ensure we attempt to re-claim it + /// if the counterparty's claim is reorged from the chain. ContentiousOutpoint { package: PackageTemplate, } From 7b9c28a02113fe1f36d2b4d332f52fa898f8ba2f Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 15:48:44 -0800 Subject: [PATCH 6/9] Rename keys for OnchainTxHandler::claimable_outpoints map --- lightning/src/chain/onchaintx.rs | 44 ++++++++++++++++---------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 6cb59b590aa..faf3fe12f60 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -251,12 +251,12 @@ pub struct OnchainTxHandler { #[cfg(anchors)] pending_claim_events: HashMap, - // Used to link outpoints claimed in a connected block to a pending claim request. - // Key is outpoint than monitor parsing has detected we have keys/scripts to claim - // Value is (pending claim request identifier, confirmation_block), identifier - // is txid of the initial claiming transaction and is immutable until outpoint is - // post-anti-reorg-delay solved, confirmaiton_block is used to erase entry if - // block with output gets disconnected. + // Used to link outpoints claimed in a connected block to a pending claim request. The keys + // represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to + // claim. The values track the pending claim request identifier and the initial confirmation + // block height, and are immutable until the outpoint has enough confirmations to meet our + // [`ANTI_REORG_DELAY`]. The initial confirmation block height is used to remove the entry if + // the block gets disconnected. #[cfg(test)] // Used in functional_test to verify sanitization pub claimable_outpoints: HashMap, #[cfg(not(test))] @@ -494,12 +494,12 @@ impl OnchainTxHandler // transaction is reorged out. let mut all_inputs_have_confirmed_spend = true; for outpoint in request_outpoints.iter() { - if let Some(first_claim_txid_height) = self.claimable_outpoints.get(*outpoint) { + if let Some((request_package_id, _)) = self.claimable_outpoints.get(*outpoint) { // We check for outpoint spends within claims individually rather than as a set // since requests can have outpoints split off. if !self.onchain_events_awaiting_threshold_conf.iter() .any(|event_entry| if let OnchainEvent::Claim { package_id } = event_entry.event { - first_claim_txid_height.0 == package_id + *request_package_id == package_id } else { // The onchain event is not a claim, keep seeking until we find one. false @@ -744,9 +744,9 @@ impl OnchainTxHandler // Scan all input to verify is one of the outpoint spent is of interest for us let mut claimed_outputs_material = Vec::new(); for inp in &tx.input { - if let Some(first_claim_txid_height) = self.claimable_outpoints.get(&inp.previous_output) { + if let Some((package_id, _)) = self.claimable_outpoints.get(&inp.previous_output) { // If outpoint has claim request pending on it... - if let Some(request) = self.pending_claim_requests.get_mut(&first_claim_txid_height.0) { + if let Some(request) = self.pending_claim_requests.get_mut(package_id) { //... we need to verify equality between transaction outpoints and claim request // outpoints to know if transaction is the original claim or a bumped one issued // by us. @@ -766,7 +766,7 @@ impl OnchainTxHandler txid: tx.txid(), height: conf_height, block_hash: Some(conf_hash), - event: OnchainEvent::Claim { package_id: first_claim_txid_height.0 } + event: OnchainEvent::Claim { package_id: *package_id } }; if !self.onchain_events_awaiting_threshold_conf.contains(&entry) { self.onchain_events_awaiting_threshold_conf.push(entry); @@ -793,7 +793,7 @@ impl OnchainTxHandler } //TODO: recompute soonest_timelock to avoid wasting a bit on fees if at_least_one_drop { - bump_candidates.insert(first_claim_txid_height.0.clone(), request.clone()); + bump_candidates.insert(*package_id, request.clone()); } } break; //No need to iterate further, either tx is our or their @@ -846,17 +846,17 @@ impl OnchainTxHandler } // Check if any pending claim request must be rescheduled - for (first_claim_txid, ref request) in self.pending_claim_requests.iter() { + for (package_id, request) in self.pending_claim_requests.iter() { if let Some(h) = request.timer() { if cur_height >= h { - bump_candidates.insert(*first_claim_txid, (*request).clone()); + bump_candidates.insert(*package_id, request.clone()); } } } // Build, bump and rebroadcast tx accordingly log_trace!(logger, "Bumping {} candidates", bump_candidates.len()); - for (first_claim_txid, request) in bump_candidates.iter() { + for (package_id, request) in bump_candidates.iter() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(cur_height, &request, &*fee_estimator, &*logger) { match bump_claim { OnchainClaim::Tx(bump_tx) => { @@ -866,10 +866,10 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(*first_claim_txid, claim_event); + self.pending_claim_events.insert(*package_id, claim_event); }, } - if let Some(request) = self.pending_claim_requests.get_mut(first_claim_txid) { + if let Some(request) = self.pending_claim_requests.get_mut(package_id) { request.set_timer(new_timer); request.set_feerate(new_feerate); } @@ -915,12 +915,12 @@ impl OnchainTxHandler //- resurect outpoint back in its claimable set and regenerate tx match entry.event { OnchainEvent::ContentiousOutpoint { package } => { - if let Some(ancestor_claimable_txid) = self.claimable_outpoints.get(package.outpoints()[0]) { - if let Some(request) = self.pending_claim_requests.get_mut(&ancestor_claimable_txid.0) { + if let Some(pending_claim) = self.claimable_outpoints.get(package.outpoints()[0]) { + if let Some(request) = self.pending_claim_requests.get_mut(&pending_claim.0) { request.merge_package(package); // Using a HashMap guarantee us than if we have multiple outpoints getting // resurrected only one bump claim tx is going to be broadcast - bump_candidates.insert(ancestor_claimable_txid.clone(), request.clone()); + bump_candidates.insert(pending_claim.clone(), request.clone()); } } }, @@ -930,7 +930,7 @@ impl OnchainTxHandler self.onchain_events_awaiting_threshold_conf.push(entry); } } - for (_first_claim_txid_height, request) in bump_candidates.iter_mut() { + for ((_package_id, _), request) in bump_candidates.iter_mut() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) { request.set_timer(new_timer); request.set_feerate(new_feerate); @@ -942,7 +942,7 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(_first_claim_txid_height.0, claim_event); + self.pending_claim_events.insert(_package_id, claim_event); }, } } From 4be56b93b043189f580bb62026b6d8b0da10fd4e Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 15:49:37 -0800 Subject: [PATCH 7/9] Maintain order of yielded claim events Since the claim events are stored internally within a HashMap, they will be yielded in a random order once dispatched. Claim events may be invalidated if a conflicting claim has confirmed on-chain and we need to generate a new claim event; the randomized order could result in the new claim event being handled prior to the previous. To maintain the order in which the claim events are generated, we track them in a Vec instead and ensure only one instance of a PackageId only ever exists within it. This would have certain performance implications, but since we're bounded by the total number of HTLCs in a commitment anyway, we're comfortable with taking the cost. --- lightning/src/chain/onchaintx.rs | 69 ++++++++++++++++++++++++++------ 1 file changed, 56 insertions(+), 13 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index faf3fe12f60..2c570f580bd 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -248,8 +248,19 @@ pub struct OnchainTxHandler { pub(crate) pending_claim_requests: HashMap, #[cfg(not(test))] pending_claim_requests: HashMap, + + // Used to track external events that need to be forwarded to the `ChainMonitor`. This `Vec` + // essentially acts as an insertion-ordered `HashMap` – there should only ever be one occurrence + // of a `PackageID`, which tracks its latest `ClaimEvent`, i.e., if a pending claim exists, and + // a new block has been connected, resulting in a new claim, the previous will be replaced with + // the new. + // + // These external events may be generated in the following cases: + // - A channel has been force closed by broadcasting the holder's latest commitment transaction + // - A block being connected/disconnected + // - Learning the preimage for an HTLC we can claim onchain #[cfg(anchors)] - pending_claim_events: HashMap, + pending_claim_events: Vec<(PackageID, ClaimEvent)>, // Used to link outpoints claimed in a connected block to a pending claim request. The keys // represent the outpoints that our `ChannelMonitor` has detected we have keys/scripts to @@ -426,7 +437,7 @@ impl<'a, 'b, ES: EntropySource, SP: SignerProvider> ReadableArgs<(&'a ES, &'b SP pending_claim_requests, onchain_events_awaiting_threshold_conf, #[cfg(anchors)] - pending_claim_events: HashMap::new(), + pending_claim_events: Vec::new(), secp_ctx, }) } @@ -447,8 +458,7 @@ impl OnchainTxHandler locktimed_packages: BTreeMap::new(), onchain_events_awaiting_threshold_conf: Vec::new(), #[cfg(anchors)] - pending_claim_events: HashMap::new(), - + pending_claim_events: Vec::new(), secp_ctx, } } @@ -463,9 +473,9 @@ impl OnchainTxHandler #[cfg(anchors)] pub(crate) fn get_and_clear_pending_claim_events(&mut self) -> Vec { - let mut ret = HashMap::new(); - swap(&mut ret, &mut self.pending_claim_events); - ret.into_iter().map(|(_, event)| event).collect::>() + let mut events = Vec::new(); + swap(&mut events, &mut self.pending_claim_events); + events.into_iter().map(|(_, event)| event).collect() } /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty @@ -709,7 +719,8 @@ impl OnchainTxHandler package_id }, }; - self.pending_claim_events.insert(package_id, claim_event); + debug_assert_eq!(self.pending_claim_events.iter().filter(|entry| entry.0 == package_id).count(), 0); + self.pending_claim_events.push((package_id, claim_event)); package_id }, }; @@ -794,6 +805,20 @@ impl OnchainTxHandler //TODO: recompute soonest_timelock to avoid wasting a bit on fees if at_least_one_drop { bump_candidates.insert(*package_id, request.clone()); + // If we have any pending claim events for the request being updated + // that have yet to be consumed, we'll remove them since they will + // end up producing an invalid transaction by double spending + // input(s) that already have a confirmed spend. If such spend is + // reorged out of the chain, then we'll attempt to re-spend the + // inputs once we see it. + #[cfg(anchors)] { + #[cfg(debug_assertions)] { + let existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == *package_id).count(); + assert!(existing == 0 || existing == 1); + } + self.pending_claim_events.retain(|entry| entry.0 != *package_id); + } } } break; //No need to iterate further, either tx is our or their @@ -829,8 +854,14 @@ impl OnchainTxHandler log_debug!(logger, "Removing claim tracking for {} due to maturation of claim package {}.", outpoint, log_bytes!(package_id)); self.claimable_outpoints.remove(outpoint); - #[cfg(anchors)] - self.pending_claim_events.remove(&package_id); + } + #[cfg(anchors)] { + #[cfg(debug_assertions)] { + let num_existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == package_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|(id, _)| *id != package_id); } } }, @@ -866,7 +897,13 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding RBF-bumped onchain event to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(*package_id, claim_event); + #[cfg(debug_assertions)] { + let num_existing = self.pending_claim_events.iter(). + filter(|entry| entry.0 == *package_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|event| event.0 != *package_id); + self.pending_claim_events.push((*package_id, claim_event)); }, } if let Some(request) = self.pending_claim_requests.get_mut(package_id) { @@ -930,7 +967,7 @@ impl OnchainTxHandler self.onchain_events_awaiting_threshold_conf.push(entry); } } - for ((_package_id, _), request) in bump_candidates.iter_mut() { + for ((_package_id, _), ref mut request) in bump_candidates.iter_mut() { if let Some((new_timer, new_feerate, bump_claim)) = self.generate_claim(height, &request, fee_estimator, &&*logger) { request.set_timer(new_timer); request.set_feerate(new_feerate); @@ -942,7 +979,13 @@ impl OnchainTxHandler #[cfg(anchors)] OnchainClaim::Event(claim_event) => { log_info!(logger, "Yielding onchain event after reorg to spend inputs {:?}", request.outpoints()); - self.pending_claim_events.insert(_package_id, claim_event); + #[cfg(debug_assertions)] { + let num_existing = self.pending_claim_events.iter() + .filter(|entry| entry.0 == *_package_id).count(); + assert!(num_existing == 0 || num_existing == 1); + } + self.pending_claim_events.retain(|event| event.0 != *_package_id); + self.pending_claim_events.push((*_package_id, claim_event)); }, } } From 2cc48c5a3c8c3332df7e53893da0318c7d799c39 Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 16:10:52 -0800 Subject: [PATCH 8/9] Add test for aggregated revoked HTLC claim on anchors channel --- lightning/src/ln/functional_test_utils.rs | 23 +- lightning/src/ln/monitor_tests.rs | 373 +++++++++++++++++++++- 2 files changed, 382 insertions(+), 14 deletions(-) diff --git a/lightning/src/ln/functional_test_utils.rs b/lightning/src/ln/functional_test_utils.rs index f48c9d099e7..51ad65949ef 100644 --- a/lightning/src/ln/functional_test_utils.rs +++ b/lightning/src/ln/functional_test_utils.rs @@ -1239,24 +1239,23 @@ macro_rules! check_warn_msg { /// Check that a channel's closing channel update has been broadcasted, and optionally /// check whether an error message event has occurred. -pub fn check_closed_broadcast(node: &Node, with_error_msg: bool) -> Option { +pub fn check_closed_broadcast(node: &Node, num_channels: usize, with_error_msg: bool) -> Vec { let msg_events = node.node.get_and_clear_pending_msg_events(); - assert_eq!(msg_events.len(), if with_error_msg { 2 } else { 1 }); - match msg_events[0] { - MessageSendEvent::BroadcastChannelUpdate { ref msg } => { - assert_eq!(msg.contents.flags & 2, 2); - }, - _ => panic!("Unexpected event"), - } - if with_error_msg { - match msg_events[1] { + assert_eq!(msg_events.len(), if with_error_msg { num_channels * 2 } else { num_channels }); + msg_events.into_iter().filter_map(|msg_event| { + match msg_event { + MessageSendEvent::BroadcastChannelUpdate { ref msg } => { + assert_eq!(msg.contents.flags & 2, 2); + None + }, MessageSendEvent::HandleError { action: msgs::ErrorAction::SendErrorMessage { ref msg }, node_id: _ } => { + assert!(with_error_msg); // TODO: Check node_id Some(msg.clone()) }, _ => panic!("Unexpected event"), } - } else { None } + }).collect() } /// Check that a channel's closing channel update has been broadcasted, and optionally @@ -1266,7 +1265,7 @@ pub fn check_closed_broadcast(node: &Node, with_error_msg: bool) -> Option { - $crate::ln::functional_test_utils::check_closed_broadcast(&$node, $with_error_msg) + $crate::ln::functional_test_utils::check_closed_broadcast(&$node, 1, $with_error_msg).pop() } } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index af33ca7b737..3bd50293ff2 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -19,20 +19,34 @@ use crate::chain::chaininterface::LowerBoundedFeeEstimator; use crate::ln::channel; #[cfg(anchors)] use crate::ln::chan_utils; +#[cfg(anchors)] +use crate::ln::channelmanager::ChannelManager; use crate::ln::channelmanager::{BREAKDOWN_TIMEOUT, PaymentId}; use crate::ln::msgs::ChannelMessageHandler; #[cfg(anchors)] use crate::util::config::UserConfig; #[cfg(anchors)] +use crate::util::crypto::sign; +#[cfg(anchors)] use crate::util::events::BumpTransactionEvent; use crate::util::events::{Event, MessageSendEvent, MessageSendEventsProvider, ClosureReason, HTLCDestination}; +#[cfg(anchors)] +use crate::util::ser::Writeable; +#[cfg(anchors)] +use crate::util::test_utils; +#[cfg(anchors)] +use bitcoin::blockdata::transaction::EcdsaSighashType; use bitcoin::blockdata::script::Builder; use bitcoin::blockdata::opcodes; use bitcoin::secp256k1::Secp256k1; #[cfg(anchors)] -use bitcoin::{Amount, Script, TxIn, TxOut, PackedLockTime}; +use bitcoin::secp256k1::SecretKey; +#[cfg(anchors)] +use bitcoin::{Amount, PublicKey, Script, TxIn, TxOut, PackedLockTime, Witness}; use bitcoin::Transaction; +#[cfg(anchors)] +use bitcoin::util::sighash::SighashCache; use crate::prelude::*; @@ -1748,7 +1762,7 @@ fn test_yield_anchors_events() { let mut holder_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); // Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the - // best block is being updated prior to the confirmed transactions. + // best block is updated before the confirmed transactions are notified. match *nodes[0].connect_style.borrow() { ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => { assert_eq!(holder_events.len(), 3); @@ -1815,3 +1829,358 @@ fn test_yield_anchors_events() { // Clear the remaining events as they're not relevant to what we're testing. nodes[0].node.get_and_clear_pending_events(); } + +#[cfg(anchors)] +#[test] +fn test_anchors_aggregated_revoked_htlc_tx() { + // Test that `ChannelMonitor`s can properly detect and claim funds from a counterparty claiming + // multiple HTLCs from multiple channels in a single transaction via the success path from a + // revoked commitment. + let secp = Secp256k1::new(); + let mut chanmon_cfgs = create_chanmon_cfgs(2); + // Required to sign a revoked commitment transaction + chanmon_cfgs[1].keys_manager.disable_revocation_policy_check = true; + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut anchors_config = UserConfig::default(); + anchors_config.channel_handshake_config.announced_channel = true; + anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config), Some(anchors_config)]); + + let bob_persister: test_utils::TestPersister; + let bob_chain_monitor: test_utils::TestChainMonitor; + let bob_deserialized: ChannelManager< + &test_utils::TestChainMonitor, &test_utils::TestBroadcaster, &test_utils::TestKeysInterface, + &test_utils::TestKeysInterface, &test_utils::TestKeysInterface, &test_utils::TestFeeEstimator, + &test_utils::TestRouter, &test_utils::TestLogger, + >; + + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let chan_a = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); + let chan_b = create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 20_000_000); + + // Route two payments for each channel from Alice to Bob to lock in the HTLCs. + let payment_a = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); + let payment_b = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); + let payment_c = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); + let payment_d = route_payment(&nodes[0], &[&nodes[1]], 50_000_000); + + // Serialize Bob with the HTLCs locked in. We'll restart Bob later on with the state at this + // point such that he broadcasts a revoked commitment transaction. + let bob_serialized = nodes[1].node.encode(); + let bob_serialized_monitor_a = get_monitor!(nodes[1], chan_a.2).encode(); + let bob_serialized_monitor_b = get_monitor!(nodes[1], chan_b.2).encode(); + + // Bob claims all the HTLCs... + claim_payment(&nodes[0], &[&nodes[1]], payment_a.0); + claim_payment(&nodes[0], &[&nodes[1]], payment_b.0); + claim_payment(&nodes[0], &[&nodes[1]], payment_c.0); + claim_payment(&nodes[0], &[&nodes[1]], payment_d.0); + + // ...and sends one back through each channel such that he has a motive to broadcast his + // revoked state. + send_payment(&nodes[1], &[&nodes[0]], 30_000_000); + send_payment(&nodes[1], &[&nodes[0]], 30_000_000); + + // Restart Bob with the revoked state and provide the HTLC preimages he claimed. + reload_node!( + nodes[1], anchors_config, bob_serialized, &[&bob_serialized_monitor_a, &bob_serialized_monitor_b], + bob_persister, bob_chain_monitor, bob_deserialized + ); + for chan_id in [chan_a.2, chan_b.2].iter() { + let monitor = get_monitor!(nodes[1], chan_id); + for payment in [payment_a, payment_b, payment_c, payment_d].iter() { + monitor.provide_payment_preimage( + &payment.1, &payment.0, &node_cfgs[1].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[1].fee_estimator), &nodes[1].logger + ); + } + } + + // Bob force closes by broadcasting his revoked state for each channel. + nodes[1].node.force_close_broadcasting_latest_txn(&chan_a.2, &nodes[0].node.get_our_node_id()).unwrap(); + check_added_monitors(&nodes[1], 1); + check_closed_broadcast(&nodes[1], 1, true); + check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed); + let revoked_commitment_a = { + let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(txn.len(), 1); + let revoked_commitment = txn.pop().unwrap(); + assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs + check_spends!(revoked_commitment, chan_a.3); + revoked_commitment + }; + nodes[1].node.force_close_broadcasting_latest_txn(&chan_b.2, &nodes[0].node.get_our_node_id()).unwrap(); + check_added_monitors(&nodes[1], 1); + check_closed_broadcast(&nodes[1], 1, true); + check_closed_event!(&nodes[1], 1, ClosureReason::HolderForceClosed); + let revoked_commitment_b = { + let mut txn = nodes[1].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(txn.len(), 1); + let revoked_commitment = txn.pop().unwrap(); + assert_eq!(revoked_commitment.output.len(), 6); // 2 HTLC outputs + 1 to_self output + 1 to_remote output + 2 anchor outputs + check_spends!(revoked_commitment, chan_b.3); + revoked_commitment + }; + + // Bob should now receive two events to bump his revoked commitment transaction fees. + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + let events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(events.len(), 2); + let anchor_tx = { + let secret_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let public_key = PublicKey::new(secret_key.public_key(&secp)); + let fee_utxo_script = Script::new_v0_p2wpkh(&public_key.wpubkey_hash().unwrap()); + let coinbase_tx = Transaction { + version: 2, + lock_time: PackedLockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output: vec![TxOut { // UTXO to attach fees to `anchor_tx` + value: Amount::ONE_BTC.to_sat(), + script_pubkey: fee_utxo_script.clone(), + }], + }; + let mut anchor_tx = Transaction { + version: 2, + lock_time: PackedLockTime::ZERO, + input: vec![ + TxIn { // Fee input + previous_output: bitcoin::OutPoint { txid: coinbase_tx.txid(), vout: 0 }, + ..Default::default() + }, + ], + output: vec![TxOut { // Fee input change + value: coinbase_tx.output[0].value / 2 , + script_pubkey: Script::new_op_return(&[]), + }], + }; + let mut signers = Vec::with_capacity(2); + for event in events { + match event { + Event::BumpTransaction(BumpTransactionEvent::ChannelClose { anchor_descriptor, .. }) => { + anchor_tx.input.push(TxIn { + previous_output: anchor_descriptor.outpoint, + ..Default::default() + }); + let signer = nodes[1].keys_manager.derive_channel_keys( + anchor_descriptor.channel_value_satoshis, &anchor_descriptor.channel_keys_id, + ); + signers.push(signer); + }, + _ => panic!("Unexpected event"), + } + } + for (i, signer) in signers.into_iter().enumerate() { + let anchor_idx = i + 1; + let funding_sig = signer.sign_holder_anchor_input(&mut anchor_tx, anchor_idx, &secp).unwrap(); + anchor_tx.input[anchor_idx].witness = chan_utils::build_anchor_input_witness( + &signer.pubkeys().funding_pubkey, &funding_sig + ); + } + let fee_utxo_sig = { + let witness_script = Script::new_p2pkh(&public_key.pubkey_hash()); + let sighash = hash_to_message!(&SighashCache::new(&anchor_tx).segwit_signature_hash( + 0, &witness_script, coinbase_tx.output[0].value, EcdsaSighashType::All + ).unwrap()[..]); + let sig = sign(&secp, &sighash, &secret_key); + let mut sig = sig.serialize_der().to_vec(); + sig.push(EcdsaSighashType::All as u8); + sig + }; + anchor_tx.input[0].witness = Witness::from_vec(vec![fee_utxo_sig, public_key.to_bytes()]); + check_spends!(anchor_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b); + anchor_tx + }; + + for node in &nodes { + mine_transactions(node, &[&revoked_commitment_a, &revoked_commitment_b, &anchor_tx]); + } + check_added_monitors!(&nodes[0], 2); + check_closed_broadcast(&nodes[0], 2, true); + check_closed_event!(&nodes[0], 2, ClosureReason::CommitmentTxConfirmed); + + // Alice should detect the confirmed revoked commitments, and attempt to claim all of the + // revoked outputs. + { + let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(txn.len(), 2); + + let (revoked_claim_a, revoked_claim_b) = if txn[0].input[0].previous_output.txid == revoked_commitment_a.txid() { + (&txn[0], &txn[1]) + } else { + (&txn[1], &txn[0]) + }; + + // TODO: to_self claim must be separate from HTLC claims + assert_eq!(revoked_claim_a.input.len(), 3); // Spends both HTLC outputs and to_self output + assert_eq!(revoked_claim_a.output.len(), 1); + check_spends!(revoked_claim_a, revoked_commitment_a); + assert_eq!(revoked_claim_b.input.len(), 3); // Spends both HTLC outputs and to_self output + assert_eq!(revoked_claim_b.output.len(), 1); + check_spends!(revoked_claim_b, revoked_commitment_b); + } + + // Since Bob was able to confirm his revoked commitment, he'll now try to claim the HTLCs + // through the success path. + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + let mut events = nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events(); + // Certain block `ConnectStyle`s cause an extra `ChannelClose` event to be emitted since the + // best block is updated before the confirmed transactions are notified. + match *nodes[1].connect_style.borrow() { + ConnectStyle::BestBlockFirst|ConnectStyle::BestBlockFirstReorgsOnlyTip|ConnectStyle::BestBlockFirstSkippingBlocks => { + assert_eq!(events.len(), 4); + if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = events.remove(0) {} + else { panic!("unexpected event"); } + if let Event::BumpTransaction(BumpTransactionEvent::ChannelClose { .. }) = events.remove(1) {} + else { panic!("unexpected event"); } + + }, + _ => assert_eq!(events.len(), 2), + }; + let htlc_tx = { + let secret_key = SecretKey::from_slice(&[1; 32]).unwrap(); + let public_key = PublicKey::new(secret_key.public_key(&secp)); + let fee_utxo_script = Script::new_v0_p2wpkh(&public_key.wpubkey_hash().unwrap()); + let coinbase_tx = Transaction { + version: 2, + lock_time: PackedLockTime::ZERO, + input: vec![TxIn { ..Default::default() }], + output: vec![TxOut { // UTXO to attach fees to `htlc_tx` + value: Amount::ONE_BTC.to_sat(), + script_pubkey: fee_utxo_script.clone(), + }], + }; + let mut htlc_tx = Transaction { + version: 2, + lock_time: PackedLockTime::ZERO, + input: vec![TxIn { // Fee input + previous_output: bitcoin::OutPoint { txid: coinbase_tx.txid(), vout: 0 }, + ..Default::default() + }], + output: vec![TxOut { // Fee input change + value: coinbase_tx.output[0].value / 2 , + script_pubkey: Script::new_op_return(&[]), + }], + }; + let mut descriptors = Vec::with_capacity(4); + for event in events { + if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { mut htlc_descriptors, .. }) = event { + assert_eq!(htlc_descriptors.len(), 2); + for htlc_descriptor in &htlc_descriptors { + assert!(!htlc_descriptor.htlc.offered); + let signer = nodes[1].keys_manager.derive_channel_keys( + htlc_descriptor.channel_value_satoshis, &htlc_descriptor.channel_keys_id + ); + let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp); + htlc_tx.input.push(htlc_descriptor.unsigned_tx_input()); + htlc_tx.output.push(htlc_descriptor.tx_output(&per_commitment_point, &secp)); + } + descriptors.append(&mut htlc_descriptors); + } else { + panic!("Unexpected event"); + } + } + for (idx, htlc_descriptor) in descriptors.into_iter().enumerate() { + let htlc_input_idx = idx + 1; + let signer = nodes[1].keys_manager.derive_channel_keys( + htlc_descriptor.channel_value_satoshis, &htlc_descriptor.channel_keys_id + ); + let our_sig = signer.sign_holder_htlc_transaction(&htlc_tx, htlc_input_idx, &htlc_descriptor, &secp).unwrap(); + let per_commitment_point = signer.get_per_commitment_point(htlc_descriptor.per_commitment_number, &secp); + let witness_script = htlc_descriptor.witness_script(&per_commitment_point, &secp); + htlc_tx.input[htlc_input_idx].witness = htlc_descriptor.tx_input_witness(&our_sig, &witness_script); + } + let fee_utxo_sig = { + let witness_script = Script::new_p2pkh(&public_key.pubkey_hash()); + let sighash = hash_to_message!(&SighashCache::new(&htlc_tx).segwit_signature_hash( + 0, &witness_script, coinbase_tx.output[0].value, EcdsaSighashType::All + ).unwrap()[..]); + let sig = sign(&secp, &sighash, &secret_key); + let mut sig = sig.serialize_der().to_vec(); + sig.push(EcdsaSighashType::All as u8); + sig + }; + htlc_tx.input[0].witness = Witness::from_vec(vec![fee_utxo_sig, public_key.to_bytes()]); + check_spends!(htlc_tx, coinbase_tx, revoked_commitment_a, revoked_commitment_b); + htlc_tx + }; + + for node in &nodes { + mine_transaction(node, &htlc_tx); + } + + // Alice should see that Bob is trying to claim to HTLCs, so she should now try to claim them at + // the second level instead. + let revoked_claims = { + let txn = nodes[0].tx_broadcaster.txn_broadcasted.lock().unwrap().split_off(0); + assert_eq!(txn.len(), 4); + + let revoked_to_self_claim_a = txn.iter().find(|tx| + tx.input.len() == 1 && + tx.output.len() == 1 && + tx.input[0].previous_output.txid == revoked_commitment_a.txid() + ).unwrap(); + check_spends!(revoked_to_self_claim_a, revoked_commitment_a); + + let revoked_to_self_claim_b = txn.iter().find(|tx| + tx.input.len() == 1 && + tx.output.len() == 1 && + tx.input[0].previous_output.txid == revoked_commitment_b.txid() + ).unwrap(); + check_spends!(revoked_to_self_claim_b, revoked_commitment_b); + + let revoked_htlc_claims = txn.iter().filter(|tx| + tx.input.len() == 2 && + tx.output.len() == 1 && + tx.input[0].previous_output.txid == htlc_tx.txid() + ).collect::>(); + assert_eq!(revoked_htlc_claims.len(), 2); + for revoked_htlc_claim in revoked_htlc_claims { + check_spends!(revoked_htlc_claim, htlc_tx); + } + + txn + }; + for node in &nodes { + mine_transactions(node, &revoked_claims.iter().collect::>()); + } + + + // Connect one block to make sure the HTLC events are not yielded while ANTI_REORG_DELAY has not + // been reached. + connect_blocks(&nodes[0], 1); + connect_blocks(&nodes[1], 1); + + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + + // Connect the remaining blocks to reach ANTI_REORG_DELAY. + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 2); + connect_blocks(&nodes[1], ANTI_REORG_DELAY - 2); + + assert!(nodes[1].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + let spendable_output_events = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + assert_eq!(spendable_output_events.len(), 4); + for (idx, event) in spendable_output_events.iter().enumerate() { + if let Event::SpendableOutputs { outputs } = event { + assert_eq!(outputs.len(), 1); + let spend_tx = nodes[0].keys_manager.backing.spend_spendable_outputs( + &[&outputs[0]], Vec::new(), Script::new_op_return(&[]), 253, &Secp256k1::new(), + ).unwrap(); + check_spends!(spend_tx, revoked_claims[idx]); + } else { + panic!("unexpected event"); + } + } + + assert!(nodes[0].node.list_channels().is_empty()); + assert!(nodes[1].node.list_channels().is_empty()); + assert!(nodes[0].chain_monitor.chain_monitor.get_claimable_balances(&[]).is_empty()); + // TODO: From Bob's PoV, he still thinks he can claim the outputs from his revoked commitment. + // This needs to be fixed before we enable pruning `ChannelMonitor`s once they don't have any + // balances to claim. + // + // The 6 claimable balances correspond to his `to_self` outputs and the 2 HTLC outputs in each + // revoked commitment which Bob has the preimage for. + assert_eq!(nodes[1].chain_monitor.chain_monitor.get_claimable_balances(&[]).len(), 6); +} From 881656ba9e981b5994b9ac7028571345bc0f466b Mon Sep 17 00:00:00 2001 From: Wilmer Paulino Date: Tue, 14 Feb 2023 16:32:45 -0800 Subject: [PATCH 9/9] Test anchors build in CI --- .github/workflows/build.yml | 1 + ci/ci-tests.sh | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 881e4f24749..72220d831d3 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -179,6 +179,7 @@ jobs: cargo check --no-default-features --features=no-std --release cargo check --no-default-features --features=futures --release cargo doc --release + RUSTDOCFLAGS="--cfg=anchors" cargo doc --release fuzz: runs-on: ubuntu-latest diff --git a/ci/ci-tests.sh b/ci/ci-tests.sh index 7b0beb90fa9..5d229a03f19 100755 --- a/ci/ci-tests.sh +++ b/ci/ci-tests.sh @@ -89,3 +89,8 @@ if [ "$RUSTC_MINOR_VERSION" -gt 55 ]; then cargo test --verbose --color always popd fi + +echo -e "\n\nTest anchors builds" +pushd lightning +RUSTFLAGS="$RUSTFLAGS --cfg=anchors" cargo test --verbose --color always -p lightning +popd