@@ -453,19 +453,27 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
453
453
}
454
454
}
455
455
456
- /* Send FIN through an IUCV socket for HIPER transport */
456
+ /* Send controlling flags through an IUCV socket for HIPER transport */
457
457
static int iucv_send_ctrl (struct sock * sk , u8 flags )
458
458
{
459
459
int err = 0 ;
460
460
int blen ;
461
461
struct sk_buff * skb ;
462
+ u8 shutdown = 0 ;
462
463
463
464
blen = sizeof (struct af_iucv_trans_hdr ) + ETH_HLEN ;
465
+ if (sk -> sk_shutdown & SEND_SHUTDOWN ) {
466
+ /* controlling flags should be sent anyway */
467
+ shutdown = sk -> sk_shutdown ;
468
+ sk -> sk_shutdown &= RCV_SHUTDOWN ;
469
+ }
464
470
skb = sock_alloc_send_skb (sk , blen , 1 , & err );
465
471
if (skb ) {
466
472
skb_reserve (skb , blen );
467
473
err = afiucv_hs_send (NULL , sk , skb , flags );
468
474
}
475
+ if (shutdown )
476
+ sk -> sk_shutdown = shutdown ;
469
477
return err ;
470
478
}
471
479
@@ -1315,8 +1323,13 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,
1315
1323
}
1316
1324
1317
1325
IUCV_SKB_CB (skb )-> offset = 0 ;
1318
- if (sock_queue_rcv_skb (sk , skb ))
1319
- skb_queue_head (& iucv_sk (sk )-> backlog_skb_q , skb );
1326
+ if (sk_filter (sk , skb )) {
1327
+ atomic_inc (& sk -> sk_drops ); /* skb rejected by filter */
1328
+ kfree_skb (skb );
1329
+ return ;
1330
+ }
1331
+ if (__sock_queue_rcv_skb (sk , skb )) /* handle rcv queue full */
1332
+ skb_queue_tail (& iucv_sk (sk )-> backlog_skb_q , skb );
1320
1333
}
1321
1334
1322
1335
/* iucv_process_message_q() - Process outstanding IUCV messages
@@ -1430,13 +1443,13 @@ static int iucv_sock_recvmsg(struct socket *sock, struct msghdr *msg,
1430
1443
rskb = skb_dequeue (& iucv -> backlog_skb_q );
1431
1444
while (rskb ) {
1432
1445
IUCV_SKB_CB (rskb )-> offset = 0 ;
1433
- if (sock_queue_rcv_skb (sk , rskb )) {
1446
+ if (__sock_queue_rcv_skb (sk , rskb )) {
1447
+ /* handle rcv queue full */
1434
1448
skb_queue_head (& iucv -> backlog_skb_q ,
1435
1449
rskb );
1436
1450
break ;
1437
- } else {
1438
- rskb = skb_dequeue (& iucv -> backlog_skb_q );
1439
1451
}
1452
+ rskb = skb_dequeue (& iucv -> backlog_skb_q );
1440
1453
}
1441
1454
if (skb_queue_empty (& iucv -> backlog_skb_q )) {
1442
1455
if (!list_empty (& iucv -> message_q .list ))
@@ -2116,12 +2129,17 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)
2116
2129
skb_reset_transport_header (skb );
2117
2130
skb_reset_network_header (skb );
2118
2131
IUCV_SKB_CB (skb )-> offset = 0 ;
2132
+ if (sk_filter (sk , skb )) {
2133
+ atomic_inc (& sk -> sk_drops ); /* skb rejected by filter */
2134
+ kfree_skb (skb );
2135
+ return NET_RX_SUCCESS ;
2136
+ }
2137
+
2119
2138
spin_lock (& iucv -> message_q .lock );
2120
2139
if (skb_queue_empty (& iucv -> backlog_skb_q )) {
2121
- if (sock_queue_rcv_skb (sk , skb )) {
2140
+ if (__sock_queue_rcv_skb (sk , skb ))
2122
2141
/* handle rcv queue full */
2123
2142
skb_queue_tail (& iucv -> backlog_skb_q , skb );
2124
- }
2125
2143
} else
2126
2144
skb_queue_tail (& iucv_sk (sk )-> backlog_skb_q , skb );
2127
2145
spin_unlock (& iucv -> message_q .lock );
0 commit comments