Skip to content

Commit 131560e

Browse files
authored
Merge pull request #2387 from vladimirfomene/add_extra_fields_to_ChannelClosed_event
Add counterparty_node_id & channel_capacity to ChannelClosed event
2 parents 9e4a35a + 7cfafc9 commit 131560e

14 files changed

+286
-218
lines changed

lightning-persister/src/lib.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@ mod tests {
237237
// Force close because cooperative close doesn't result in any persisted
238238
// updates.
239239
nodes[0].node.force_close_broadcasting_latest_txn(&nodes[0].node.list_channels()[0].channel_id, &nodes[1].node.get_our_node_id()).unwrap();
240-
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
240+
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
241241
check_closed_broadcast!(nodes[0], true);
242242
check_added_monitors!(nodes[0], 1);
243243

@@ -246,7 +246,7 @@ mod tests {
246246

247247
connect_block(&nodes[1], &create_dummy_block(nodes[0].best_block_hash(), 42, vec![node_txn[0].clone(), node_txn[0].clone()]));
248248
check_closed_broadcast!(nodes[1], true);
249-
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
249+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[0].node.get_our_node_id()], 100000);
250250
check_added_monitors!(nodes[1], 1);
251251

252252
// Make sure everything is persisted as expected after close.
@@ -270,7 +270,7 @@ mod tests {
270270
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
271271
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
272272
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
273-
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
273+
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
274274
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
275275
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
276276
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();
@@ -309,7 +309,7 @@ mod tests {
309309
let nodes = create_network(2, &node_cfgs, &node_chanmgrs);
310310
let chan = create_announced_chan_between_nodes(&nodes, 0, 1);
311311
nodes[1].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[0].node.get_our_node_id()).unwrap();
312-
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
312+
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
313313
let mut added_monitors = nodes[1].chain_monitor.added_monitors.lock().unwrap();
314314
let update_map = nodes[1].chain_monitor.latest_monitor_update_id.lock().unwrap();
315315
let update_id = update_map.get(&added_monitors[0].0.to_channel_id()).unwrap();

lightning/src/chain/chainmonitor.rs

+4-2
Original file line numberDiff line numberDiff line change
@@ -966,7 +966,8 @@ mod tests {
966966
assert!(err.contains("ChannelMonitor storage failure")));
967967
check_added_monitors!(nodes[0], 2); // After the failure we generate a close-channel monitor update
968968
check_closed_broadcast!(nodes[0], true);
969-
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
969+
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
970+
[nodes[1].node.get_our_node_id()], 100000);
970971

971972
// However, as the ChainMonitor is still waiting for the original persistence to complete,
972973
// it won't yet release the MonitorEvents.
@@ -1013,7 +1014,8 @@ mod tests {
10131014
// ... however once we get events once, the channel will close, creating a channel-closed
10141015
// ChannelMonitorUpdate.
10151016
check_closed_broadcast!(nodes[0], true);
1016-
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() });
1017+
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "Failed to persist ChannelMonitor update during chain sync".to_string() },
1018+
[nodes[1].node.get_our_node_id()], 100000);
10171019
check_added_monitors!(nodes[0], 1);
10181020
}
10191021
}

lightning/src/chain/channelmonitor.rs

+2-1
Original file line numberDiff line numberDiff line change
@@ -4276,7 +4276,8 @@ mod tests {
42764276
assert!(err.contains("ChannelMonitor storage failure")));
42774277
check_added_monitors!(nodes[1], 2); // After the failure we generate a close-channel monitor update
42784278
check_closed_broadcast!(nodes[1], true);
4279-
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
4279+
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
4280+
[nodes[0].node.get_our_node_id()], 100000);
42804281

42814282
// Build a new ChannelMonitorUpdate which contains both the failing commitment tx update
42824283
// and provides the claim preimages for the two pending HTLCs. The first update generates

lightning/src/events/mod.rs

+20-3
Original file line numberDiff line numberDiff line change
@@ -767,7 +767,15 @@ pub enum Event {
767767
/// [`UserConfig::manually_accept_inbound_channels`]: crate::util::config::UserConfig::manually_accept_inbound_channels
768768
user_channel_id: u128,
769769
/// The reason the channel was closed.
770-
reason: ClosureReason
770+
reason: ClosureReason,
771+
/// Counterparty in the closed channel.
772+
///
773+
/// This field will be `None` for objects serialized prior to LDK 0.0.117.
774+
counterparty_node_id: Option<PublicKey>,
775+
/// Channel capacity of the closing channel (sats).
776+
///
777+
/// This field will be `None` for objects serialized prior to LDK 0.0.117.
778+
channel_capacity_sats: Option<u64>,
771779
},
772780
/// Used to indicate to the user that they can abandon the funding transaction and recycle the
773781
/// inputs for another purpose.
@@ -968,7 +976,9 @@ impl Writeable for Event {
968976
(5, outbound_amount_forwarded_msat, option),
969977
});
970978
},
971-
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason } => {
979+
&Event::ChannelClosed { ref channel_id, ref user_channel_id, ref reason,
980+
ref counterparty_node_id, ref channel_capacity_sats
981+
} => {
972982
9u8.write(writer)?;
973983
// `user_channel_id` used to be a single u64 value. In order to remain backwards
974984
// compatible with versions prior to 0.0.113, the u128 is serialized as two
@@ -980,6 +990,8 @@ impl Writeable for Event {
980990
(1, user_channel_id_low, required),
981991
(2, reason, required),
982992
(3, user_channel_id_high, required),
993+
(5, counterparty_node_id, option),
994+
(7, channel_capacity_sats, option),
983995
});
984996
},
985997
&Event::DiscardFunding { ref channel_id, ref transaction } => {
@@ -1264,11 +1276,15 @@ impl MaybeReadable for Event {
12641276
let mut reason = UpgradableRequired(None);
12651277
let mut user_channel_id_low_opt: Option<u64> = None;
12661278
let mut user_channel_id_high_opt: Option<u64> = None;
1279+
let mut counterparty_node_id = None;
1280+
let mut channel_capacity_sats = None;
12671281
read_tlv_fields!(reader, {
12681282
(0, channel_id, required),
12691283
(1, user_channel_id_low_opt, option),
12701284
(2, reason, upgradable_required),
12711285
(3, user_channel_id_high_opt, option),
1286+
(5, counterparty_node_id, option),
1287+
(7, channel_capacity_sats, option),
12721288
});
12731289

12741290
// `user_channel_id` used to be a single u64 value. In order to remain
@@ -1277,7 +1293,8 @@ impl MaybeReadable for Event {
12771293
let user_channel_id = (user_channel_id_low_opt.unwrap_or(0) as u128) +
12781294
((user_channel_id_high_opt.unwrap_or(0) as u128) << 64);
12791295

1280-
Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required) }))
1296+
Ok(Some(Event::ChannelClosed { channel_id, user_channel_id, reason: _init_tlv_based_struct_field!(reason, upgradable_required),
1297+
counterparty_node_id, channel_capacity_sats }))
12811298
};
12821299
f()
12831300
},

lightning/src/ln/chanmon_update_fail_tests.rs

+14-11
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,8 @@ fn test_simple_monitor_permanent_update_fail() {
7070
// PaymentPathFailed event
7171

7272
assert_eq!(nodes[0].node.list_channels().len(), 0);
73-
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
73+
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
74+
[nodes[1].node.get_our_node_id()], 100000);
7475
}
7576

7677
#[test]
@@ -247,7 +248,7 @@ fn do_test_simple_monitor_temporary_update_fail(disconnect: bool) {
247248
// PaymentPathFailed event
248249

249250
assert_eq!(nodes[0].node.list_channels().len(), 0);
250-
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
251+
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
251252
}
252253

253254
#[test]
@@ -1987,8 +1988,8 @@ fn do_during_funding_monitor_fail(confirm_a_first: bool, restore_b_before_conf:
19871988

19881989
send_payment(&nodes[0], &[&nodes[1]], 8000000);
19891990
close_channel(&nodes[0], &nodes[1], &channel_id, funding_tx, true);
1990-
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
1991-
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
1991+
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
1992+
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
19921993
}
19931994

19941995
#[test]
@@ -2188,7 +2189,7 @@ fn test_fail_htlc_on_broadcast_after_claim() {
21882189
expect_payment_forwarded!(nodes[1], nodes[0], nodes[2], Some(1000), false, false);
21892190

21902191
mine_transaction(&nodes[1], &bs_txn[0]);
2191-
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed);
2192+
check_closed_event!(nodes[1], 1, ClosureReason::CommitmentTxConfirmed, [nodes[2].node.get_our_node_id()], 100000);
21922193
check_closed_broadcast!(nodes[1], true);
21932194
connect_blocks(&nodes[1], ANTI_REORG_DELAY - 1);
21942195
check_added_monitors!(nodes[1], 1);
@@ -2666,8 +2667,8 @@ fn test_temporary_error_during_shutdown() {
26662667
assert_eq!(txn_a, txn_b);
26672668
assert_eq!(txn_a.len(), 1);
26682669
check_spends!(txn_a[0], funding_tx);
2669-
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
2670-
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
2670+
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 100000);
2671+
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 100000);
26712672
}
26722673

26732674
#[test]
@@ -2696,7 +2697,8 @@ fn test_permanent_error_during_sending_shutdown() {
26962697
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
26972698

26982699
check_added_monitors!(nodes[0], 2);
2699-
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2700+
check_closed_event!(nodes[0], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
2701+
[nodes[1].node.get_our_node_id()], 100000);
27002702
}
27012703

27022704
#[test]
@@ -2727,7 +2729,8 @@ fn test_permanent_error_during_handling_shutdown() {
27272729
if let MessageSendEvent::HandleError { .. } = msg_events[2] {} else { panic!(); }
27282730

27292731
check_added_monitors!(nodes[1], 2);
2730-
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() });
2732+
check_closed_event!(nodes[1], 1, ClosureReason::ProcessingError { err: "ChannelMonitor storage failure".to_string() },
2733+
[nodes[0].node.get_our_node_id()], 100000);
27312734
}
27322735

27332736
#[test]
@@ -2921,7 +2924,7 @@ fn do_test_outbound_reload_without_init_mon(use_0conf: bool) {
29212924
nodes[0].chain_source.watched_outputs.lock().unwrap().clear();
29222925

29232926
reload_node!(nodes[0], &nodes[0].node.encode(), &[], persister, new_chain_monitor, nodes_0_deserialized);
2924-
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer);
2927+
check_closed_event!(nodes[0], 1, ClosureReason::DisconnectedPeer, [nodes[1].node.get_our_node_id()], 100000);
29252928
assert!(nodes[0].node.list_channels().is_empty());
29262929
}
29272930

@@ -3008,7 +3011,7 @@ fn do_test_inbound_reload_without_init_mon(use_0conf: bool, lock_commitment: boo
30083011

30093012
reload_node!(nodes[1], &nodes[1].node.encode(), &[], persister, new_chain_monitor, nodes_1_deserialized);
30103013

3011-
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer);
3014+
check_closed_event!(nodes[1], 1, ClosureReason::DisconnectedPeer, [nodes[0].node.get_our_node_id()], 100000);
30123015
assert!(nodes[1].node.list_channels().is_empty());
30133016
}
30143017

lightning/src/ln/channelmanager.rs

+28-15
Original file line numberDiff line numberDiff line change
@@ -397,6 +397,7 @@ struct MsgHandleErrInternal {
397397
err: msgs::LightningError,
398398
chan_id: Option<([u8; 32], u128)>, // If Some a channel of ours has been closed
399399
shutdown_finish: Option<(ShutdownResult, Option<msgs::ChannelUpdate>)>,
400+
channel_capacity: Option<u64>,
400401
}
401402
impl MsgHandleErrInternal {
402403
#[inline]
@@ -413,14 +414,15 @@ impl MsgHandleErrInternal {
413414
},
414415
chan_id: None,
415416
shutdown_finish: None,
417+
channel_capacity: None,
416418
}
417419
}
418420
#[inline]
419421
fn from_no_close(err: msgs::LightningError) -> Self {
420-
Self { err, chan_id: None, shutdown_finish: None }
422+
Self { err, chan_id: None, shutdown_finish: None, channel_capacity: None }
421423
}
422424
#[inline]
423-
fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>) -> Self {
425+
fn from_finish_shutdown(err: String, channel_id: [u8; 32], user_channel_id: u128, shutdown_res: ShutdownResult, channel_update: Option<msgs::ChannelUpdate>, channel_capacity: u64) -> Self {
424426
Self {
425427
err: LightningError {
426428
err: err.clone(),
@@ -433,6 +435,7 @@ impl MsgHandleErrInternal {
433435
},
434436
chan_id: Some((channel_id, user_channel_id)),
435437
shutdown_finish: Some((shutdown_res, channel_update)),
438+
channel_capacity: Some(channel_capacity)
436439
}
437440
}
438441
#[inline]
@@ -465,6 +468,7 @@ impl MsgHandleErrInternal {
465468
},
466469
chan_id: None,
467470
shutdown_finish: None,
471+
channel_capacity: None,
468472
}
469473
}
470474
}
@@ -1680,7 +1684,7 @@ macro_rules! handle_error {
16801684

16811685
match $internal {
16821686
Ok(msg) => Ok(msg),
1683-
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish }) => {
1687+
Err(MsgHandleErrInternal { err, chan_id, shutdown_finish, channel_capacity }) => {
16841688
let mut msg_events = Vec::with_capacity(2);
16851689

16861690
if let Some((shutdown_res, update_option)) = shutdown_finish {
@@ -1693,7 +1697,9 @@ macro_rules! handle_error {
16931697
if let Some((channel_id, user_channel_id)) = chan_id {
16941698
$self.pending_events.lock().unwrap().push_back((events::Event::ChannelClosed {
16951699
channel_id, user_channel_id,
1696-
reason: ClosureReason::ProcessingError { err: err.err.clone() }
1700+
reason: ClosureReason::ProcessingError { err: err.err.clone() },
1701+
counterparty_node_id: Some($counterparty_node_id),
1702+
channel_capacity_sats: channel_capacity,
16971703
}, None));
16981704
}
16991705
}
@@ -1766,7 +1772,7 @@ macro_rules! convert_chan_err {
17661772
update_maps_on_chan_removal!($self, &$channel.context);
17671773
let shutdown_res = $channel.context.force_shutdown(true);
17681774
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel.context.get_user_id(),
1769-
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok()))
1775+
shutdown_res, $self.get_channel_update_for_broadcast(&$channel).ok(), $channel.context.get_value_satoshis()))
17701776
},
17711777
}
17721778
};
@@ -1779,7 +1785,7 @@ macro_rules! convert_chan_err {
17791785
update_maps_on_chan_removal!($self, &$channel_context);
17801786
let shutdown_res = $channel_context.force_shutdown(false);
17811787
(true, MsgHandleErrInternal::from_finish_shutdown(msg, *$channel_id, $channel_context.get_user_id(),
1782-
shutdown_res, None))
1788+
shutdown_res, None, $channel_context.get_value_satoshis()))
17831789
},
17841790
}
17851791
}
@@ -1958,7 +1964,7 @@ macro_rules! handle_new_monitor_update {
19581964
let res = Err(MsgHandleErrInternal::from_finish_shutdown(
19591965
"ChannelMonitor storage failure".to_owned(), $chan.context.channel_id(),
19601966
$chan.context.get_user_id(), $chan.context.force_shutdown(false),
1961-
$self.get_channel_update_for_broadcast(&$chan).ok()));
1967+
$self.get_channel_update_for_broadcast(&$chan).ok(), $chan.context.get_value_satoshis()));
19621968
$remove;
19631969
res
19641970
},
@@ -2392,7 +2398,9 @@ where
23922398
pending_events_lock.push_back((events::Event::ChannelClosed {
23932399
channel_id: context.channel_id(),
23942400
user_channel_id: context.get_user_id(),
2395-
reason: closure_reason
2401+
reason: closure_reason,
2402+
counterparty_node_id: Some(context.get_counterparty_node_id()),
2403+
channel_capacity_sats: Some(context.get_value_satoshis()),
23962404
}, None));
23972405
}
23982406

@@ -3408,7 +3416,8 @@ where
34083416
let channel_id = chan.context.channel_id();
34093417
let user_id = chan.context.get_user_id();
34103418
let shutdown_res = chan.context.force_shutdown(false);
3411-
(chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None))
3419+
let channel_capacity = chan.context.get_value_satoshis();
3420+
(chan, MsgHandleErrInternal::from_finish_shutdown(msg, channel_id, user_id, shutdown_res, None, channel_capacity))
34123421
} else { unreachable!(); });
34133422
match funding_res {
34143423
Ok((chan, funding_msg)) => (chan, funding_msg),
@@ -5492,7 +5501,7 @@ where
54925501
let user_id = inbound_chan.context.get_user_id();
54935502
let shutdown_res = inbound_chan.context.force_shutdown(false);
54945503
return Err(MsgHandleErrInternal::from_finish_shutdown(format!("{}", err),
5495-
msg.temporary_channel_id, user_id, shutdown_res, None));
5504+
msg.temporary_channel_id, user_id, shutdown_res, None, inbound_chan.context.get_value_satoshis()));
54965505
},
54975506
}
54985507
},
@@ -8442,7 +8451,9 @@ where
84428451
channel_closures.push_back((events::Event::ChannelClosed {
84438452
channel_id: channel.context.channel_id(),
84448453
user_channel_id: channel.context.get_user_id(),
8445-
reason: ClosureReason::OutdatedChannelManager
8454+
reason: ClosureReason::OutdatedChannelManager,
8455+
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
8456+
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
84468457
}, None));
84478458
for (channel_htlc_source, payment_hash) in channel.inflight_htlc_sources() {
84488459
let mut found_htlc = false;
@@ -8494,6 +8505,8 @@ where
84948505
channel_id: channel.context.channel_id(),
84958506
user_channel_id: channel.context.get_user_id(),
84968507
reason: ClosureReason::DisconnectedPeer,
8508+
counterparty_node_id: Some(channel.context.get_counterparty_node_id()),
8509+
channel_capacity_sats: Some(channel.context.get_value_satoshis()),
84978510
}, None));
84988511
} else {
84998512
log_error!(args.logger, "Missing ChannelMonitor for channel {} needed by ChannelManager.", log_bytes!(channel.context.channel_id()));
@@ -9710,7 +9723,7 @@ mod tests {
97109723
nodes[0].node.force_close_broadcasting_latest_txn(&chan.2, &nodes[1].node.get_our_node_id()).unwrap();
97119724
check_closed_broadcast!(nodes[0], true);
97129725
check_added_monitors!(nodes[0], 1);
9713-
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed);
9726+
check_closed_event!(nodes[0], 1, ClosureReason::HolderForceClosed, [nodes[1].node.get_our_node_id()], 100000);
97149727

97159728
{
97169729
// Assert that nodes[1] is awaiting removal for nodes[0] once nodes[1] has been
@@ -9873,8 +9886,8 @@ mod tests {
98739886
}
98749887
let (_nodes_1_update, _none) = get_closing_signed_broadcast!(nodes[1].node, nodes[0].node.get_our_node_id());
98759888

9876-
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure);
9877-
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure);
9889+
check_closed_event!(nodes[0], 1, ClosureReason::CooperativeClosure, [nodes[1].node.get_our_node_id()], 1000000);
9890+
check_closed_event!(nodes[1], 1, ClosureReason::CooperativeClosure, [nodes[0].node.get_our_node_id()], 1000000);
98789891
}
98799892

98809893
fn check_not_connected_to_peer_error<T>(res_err: Result<T, APIError>, expected_public_key: PublicKey) {
@@ -10267,7 +10280,7 @@ mod tests {
1026710280
let open_channel_msg = get_event_msg!(nodes[0], MessageSendEvent::SendOpenChannel, nodes[1].node.get_our_node_id());
1026810281
assert!(!open_channel_msg.channel_type.unwrap().supports_anchors_zero_fee_htlc_tx());
1026910282

10270-
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed);
10283+
check_closed_event!(nodes[1], 1, ClosureReason::HolderForceClosed, [nodes[0].node.get_our_node_id()], 100000);
1027110284
}
1027210285

1027310286
#[test]

0 commit comments

Comments
 (0)