@@ -1361,62 +1361,112 @@ mod tests {
1361
1361
}
1362
1362
}
1363
1363
1364
+ macro_rules! do_test_not_pruning_network_graph_until_graph_sync_completion {
1365
+ ( $nodes: expr, $receive: expr, $sleep: expr) => {
1366
+ let features = ChannelFeatures :: empty( ) ;
1367
+ $nodes[ 0 ] . network_graph. add_channel_from_partial_announcement(
1368
+ 42 , 53 , features, $nodes[ 0 ] . node. get_our_node_id( ) , $nodes[ 1 ] . node. get_our_node_id( )
1369
+ ) . expect( "Failed to update channel from partial announcement" ) ;
1370
+ let original_graph_description = $nodes[ 0 ] . network_graph. to_string( ) ;
1371
+ assert!( original_graph_description. contains( "42: features: 0000, node_one:" ) ) ;
1372
+ assert_eq!( $nodes[ 0 ] . network_graph. read_only( ) . channels( ) . len( ) , 1 ) ;
1373
+
1374
+ loop {
1375
+ $sleep;
1376
+ let log_entries = $nodes[ 0 ] . logger. lines. lock( ) . unwrap( ) ;
1377
+ let loop_counter = "Calling ChannelManager's timer_tick_occurred" . to_string( ) ;
1378
+ if * log_entries. get( & ( "lightning_background_processor" . to_string( ) , loop_counter) )
1379
+ . unwrap_or( & 0 ) > 1
1380
+ {
1381
+ // Wait until the loop has gone around at least twice.
1382
+ break
1383
+ }
1384
+ }
1385
+
1386
+ let initialization_input = vec![
1387
+ 76 , 68 , 75 , 1 , 111 , 226 , 140 , 10 , 182 , 241 , 179 , 114 , 193 , 166 , 162 , 70 , 174 , 99 , 247 ,
1388
+ 79 , 147 , 30 , 131 , 101 , 225 , 90 , 8 , 156 , 104 , 214 , 25 , 0 , 0 , 0 , 0 , 0 , 97 , 227 , 98 , 218 ,
1389
+ 0 , 0 , 0 , 4 , 2 , 22 , 7 , 207 , 206 , 25 , 164 , 197 , 231 , 230 , 231 , 56 , 102 , 61 , 250 , 251 ,
1390
+ 187 , 172 , 38 , 46 , 79 , 247 , 108 , 44 , 155 , 48 , 219 , 238 , 252 , 53 , 192 , 6 , 67 , 2 , 36 , 125 ,
1391
+ 157 , 176 , 223 , 175 , 234 , 116 , 94 , 248 , 201 , 225 , 97 , 235 , 50 , 47 , 115 , 172 , 63 , 136 ,
1392
+ 88 , 216 , 115 , 11 , 111 , 217 , 114 , 84 , 116 , 124 , 231 , 107 , 2 , 158 , 1 , 242 , 121 , 152 , 106 ,
1393
+ 204 , 131 , 186 , 35 , 93 , 70 , 216 , 10 , 237 , 224 , 183 , 89 , 95 , 65 , 3 , 83 , 185 , 58 , 138 ,
1394
+ 181 , 64 , 187 , 103 , 127 , 68 , 50 , 2 , 201 , 19 , 17 , 138 , 136 , 149 , 185 , 226 , 156 , 137 , 175 ,
1395
+ 110 , 32 , 237 , 0 , 217 , 90 , 31 , 100 , 228 , 149 , 46 , 219 , 175 , 168 , 77 , 4 , 143 , 38 , 128 ,
1396
+ 76 , 97 , 0 , 0 , 0 , 2 , 0 , 0 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 0 , 1 , 0 , 0 , 255 , 2 , 68 ,
1397
+ 226 , 0 , 6 , 11 , 0 , 1 , 2 , 3 , 0 , 0 , 0 , 2 , 0 , 40 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , 232 , 0 , 0 , 3 , 232 ,
1398
+ 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 58 , 85 , 116 , 216 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 25 , 0 , 0 ,
1399
+ 0 , 1 , 0 , 0 , 0 , 125 , 255 , 2 , 68 , 226 , 0 , 6 , 11 , 0 , 1 , 5 , 0 , 0 , 0 , 0 , 29 , 129 , 25 , 192 ,
1400
+ ] ;
1401
+ $nodes[ 0 ] . rapid_gossip_sync. update_network_graph_no_std( & initialization_input[ ..] , Some ( 1642291930 ) ) . unwrap( ) ;
1402
+
1403
+ // this should have added two channels
1404
+ assert_eq!( $nodes[ 0 ] . network_graph. read_only( ) . channels( ) . len( ) , 3 ) ;
1405
+
1406
+ $receive. expect( "Network graph not pruned within deadline" ) ;
1407
+
1408
+ // all channels should now be pruned
1409
+ assert_eq!( $nodes[ 0 ] . network_graph. read_only( ) . channels( ) . len( ) , 0 ) ;
1410
+ }
1411
+ }
1412
+
1364
1413
#[ test]
1365
1414
fn test_not_pruning_network_graph_until_graph_sync_completion ( ) {
1415
+ let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
1416
+
1366
1417
let nodes = create_nodes ( 2 , "test_not_pruning_network_graph_until_graph_sync_completion" . to_string ( ) ) ;
1367
1418
let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
1368
- let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
1369
1419
let persister = Arc :: new ( Persister :: new ( data_dir) . with_graph_persistence_notifier ( sender) ) ;
1370
- let network_graph = nodes[ 0 ] . network_graph . clone ( ) ;
1371
- let features = ChannelFeatures :: empty ( ) ;
1372
- network_graph. add_channel_from_partial_announcement ( 42 , 53 , features, nodes[ 0 ] . node . get_our_node_id ( ) , nodes[ 1 ] . node . get_our_node_id ( ) )
1373
- . expect ( "Failed to update channel from partial announcement" ) ;
1374
- let original_graph_description = network_graph. to_string ( ) ;
1375
- assert ! ( original_graph_description. contains( "42: features: 0000, node_one:" ) ) ;
1376
- assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 1 ) ;
1377
1420
1378
1421
let event_handler = |_: _ | { } ;
1379
1422
let background_processor = BackgroundProcessor :: start ( persister, event_handler, nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) , nodes[ 0 ] . rapid_gossip_sync ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) , Some ( nodes[ 0 ] . scorer . clone ( ) ) ) ;
1380
1423
1381
- loop {
1382
- let log_entries = nodes[ 0 ] . logger . lines . lock ( ) . unwrap ( ) ;
1383
- let loop_counter = "Calling ChannelManager's timer_tick_occurred" . to_string ( ) ;
1384
- if * log_entries. get ( & ( "lightning_background_processor" . to_string ( ) , loop_counter) )
1385
- . unwrap_or ( & 0 ) > 1
1386
- {
1387
- // Wait until the loop has gone around at least twice.
1388
- break
1389
- }
1390
- }
1391
-
1392
- let initialization_input = vec ! [
1393
- 76 , 68 , 75 , 1 , 111 , 226 , 140 , 10 , 182 , 241 , 179 , 114 , 193 , 166 , 162 , 70 , 174 , 99 , 247 ,
1394
- 79 , 147 , 30 , 131 , 101 , 225 , 90 , 8 , 156 , 104 , 214 , 25 , 0 , 0 , 0 , 0 , 0 , 97 , 227 , 98 , 218 ,
1395
- 0 , 0 , 0 , 4 , 2 , 22 , 7 , 207 , 206 , 25 , 164 , 197 , 231 , 230 , 231 , 56 , 102 , 61 , 250 , 251 ,
1396
- 187 , 172 , 38 , 46 , 79 , 247 , 108 , 44 , 155 , 48 , 219 , 238 , 252 , 53 , 192 , 6 , 67 , 2 , 36 , 125 ,
1397
- 157 , 176 , 223 , 175 , 234 , 116 , 94 , 248 , 201 , 225 , 97 , 235 , 50 , 47 , 115 , 172 , 63 , 136 ,
1398
- 88 , 216 , 115 , 11 , 111 , 217 , 114 , 84 , 116 , 124 , 231 , 107 , 2 , 158 , 1 , 242 , 121 , 152 , 106 ,
1399
- 204 , 131 , 186 , 35 , 93 , 70 , 216 , 10 , 237 , 224 , 183 , 89 , 95 , 65 , 3 , 83 , 185 , 58 , 138 ,
1400
- 181 , 64 , 187 , 103 , 127 , 68 , 50 , 2 , 201 , 19 , 17 , 138 , 136 , 149 , 185 , 226 , 156 , 137 , 175 ,
1401
- 110 , 32 , 237 , 0 , 217 , 90 , 31 , 100 , 228 , 149 , 46 , 219 , 175 , 168 , 77 , 4 , 143 , 38 , 128 ,
1402
- 76 , 97 , 0 , 0 , 0 , 2 , 0 , 0 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 0 , 1 , 0 , 0 , 255 , 2 , 68 ,
1403
- 226 , 0 , 6 , 11 , 0 , 1 , 2 , 3 , 0 , 0 , 0 , 2 , 0 , 40 , 0 , 0 , 0 , 0 , 0 , 0 , 3 , 232 , 0 , 0 , 3 , 232 ,
1404
- 0 , 0 , 0 , 1 , 0 , 0 , 0 , 0 , 58 , 85 , 116 , 216 , 255 , 8 , 153 , 192 , 0 , 2 , 27 , 0 , 0 , 25 , 0 , 0 ,
1405
- 0 , 1 , 0 , 0 , 0 , 125 , 255 , 2 , 68 , 226 , 0 , 6 , 11 , 0 , 1 , 5 , 0 , 0 , 0 , 0 , 29 , 129 , 25 , 192 ,
1406
- ] ;
1407
- nodes[ 0 ] . rapid_gossip_sync . update_network_graph_no_std ( & initialization_input[ ..] , Some ( 1642291930 ) ) . unwrap ( ) ;
1408
-
1409
- // this should have added two channels
1410
- assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 3 ) ;
1411
-
1412
- receiver
1413
- . recv_timeout ( Duration :: from_secs ( super :: FIRST_NETWORK_PRUNE_TIMER * 5 ) )
1414
- . expect ( "Network graph not pruned within deadline" ) ;
1424
+ do_test_not_pruning_network_graph_until_graph_sync_completion ! ( nodes,
1425
+ receiver. recv_timeout( Duration :: from_secs( super :: FIRST_NETWORK_PRUNE_TIMER * 5 ) ) ,
1426
+ std:: thread:: sleep( Duration :: from_millis( 1 ) ) ) ;
1415
1427
1416
1428
background_processor. stop ( ) . unwrap ( ) ;
1429
+ }
1430
+
1431
+ #[ tokio:: test]
1432
+ #[ cfg( feature = "futures" ) ]
1433
+ async fn test_not_pruning_network_graph_until_graph_sync_completion_async ( ) {
1434
+ let ( sender, receiver) = std:: sync:: mpsc:: sync_channel ( 1 ) ;
1435
+
1436
+ let nodes = create_nodes ( 2 , "test_not_pruning_network_graph_until_graph_sync_completion_async" . to_string ( ) ) ;
1437
+ let data_dir = nodes[ 0 ] . persister . get_data_dir ( ) ;
1438
+ let persister = Arc :: new ( Persister :: new ( data_dir) . with_graph_persistence_notifier ( sender) ) ;
1417
1439
1418
- // all channels should now be pruned
1419
- assert_eq ! ( network_graph. read_only( ) . channels( ) . len( ) , 0 ) ;
1440
+ let ( exit_sender, exit_receiver) = tokio:: sync:: watch:: channel ( ( ) ) ;
1441
+ let bp_future = super :: process_events_async (
1442
+ persister, |_: _ | { async { } } , nodes[ 0 ] . chain_monitor . clone ( ) , nodes[ 0 ] . node . clone ( ) ,
1443
+ nodes[ 0 ] . rapid_gossip_sync ( ) , nodes[ 0 ] . peer_manager . clone ( ) , nodes[ 0 ] . logger . clone ( ) ,
1444
+ Some ( nodes[ 0 ] . scorer . clone ( ) ) , move |dur : Duration | {
1445
+ let mut exit_receiver = exit_receiver. clone ( ) ;
1446
+ Box :: pin ( async move {
1447
+ tokio:: select! {
1448
+ _ = tokio:: time:: sleep( dur) => false ,
1449
+ _ = exit_receiver. changed( ) => true ,
1450
+ }
1451
+ } )
1452
+ } , false ,
1453
+ ) ;
1454
+ // TODO: Drop _local and simply spawn after #2003
1455
+ let local_set = tokio:: task:: LocalSet :: new ( ) ;
1456
+ local_set. spawn_local ( bp_future) ;
1457
+ local_set. spawn_local ( async move {
1458
+ do_test_not_pruning_network_graph_until_graph_sync_completion ! ( nodes, {
1459
+ let mut i = 0 ;
1460
+ loop {
1461
+ tokio:: time:: sleep( Duration :: from_secs( super :: FIRST_NETWORK_PRUNE_TIMER ) ) . await ;
1462
+ if let Ok ( ( ) ) = receiver. try_recv( ) { break Ok :: <( ) , ( ) >( ( ) ) ; }
1463
+ assert!( i < 5 ) ;
1464
+ i += 1 ;
1465
+ }
1466
+ } , tokio:: time:: sleep( Duration :: from_millis( 1 ) ) . await ) ;
1467
+ exit_sender. send ( ( ) ) . unwrap ( ) ;
1468
+ } ) ;
1469
+ local_set. await ;
1420
1470
}
1421
1471
1422
1472
macro_rules! do_test_payment_path_scoring {
0 commit comments