@@ -511,7 +511,7 @@ static struct sk_buff *xsk_build_skb(struct xdp_sock *xs,
511
511
return skb ;
512
512
}
513
513
514
- static int xsk_generic_xmit (struct sock * sk )
514
+ static int __xsk_generic_xmit (struct sock * sk )
515
515
{
516
516
struct xdp_sock * xs = xdp_sk (sk );
517
517
u32 max_batch = TX_BATCH_SIZE ;
@@ -594,22 +594,13 @@ static int xsk_generic_xmit(struct sock *sk)
594
594
return err ;
595
595
}
596
596
597
- static int xsk_xmit (struct sock * sk )
597
+ static int xsk_generic_xmit (struct sock * sk )
598
598
{
599
- struct xdp_sock * xs = xdp_sk (sk );
600
599
int ret ;
601
600
602
- if (unlikely (!(xs -> dev -> flags & IFF_UP )))
603
- return - ENETDOWN ;
604
- if (unlikely (!xs -> tx ))
605
- return - ENOBUFS ;
606
-
607
- if (xs -> zc )
608
- return xsk_wakeup (xs , XDP_WAKEUP_TX );
609
-
610
601
/* Drop the RCU lock since the SKB path might sleep. */
611
602
rcu_read_unlock ();
612
- ret = xsk_generic_xmit (sk );
603
+ ret = __xsk_generic_xmit (sk );
613
604
/* Reaquire RCU lock before going into common code. */
614
605
rcu_read_lock ();
615
606
@@ -627,17 +618,31 @@ static bool xsk_no_wakeup(struct sock *sk)
627
618
#endif
628
619
}
629
620
621
+ static int xsk_check_common (struct xdp_sock * xs )
622
+ {
623
+ if (unlikely (!xsk_is_bound (xs )))
624
+ return - ENXIO ;
625
+ if (unlikely (!(xs -> dev -> flags & IFF_UP )))
626
+ return - ENETDOWN ;
627
+
628
+ return 0 ;
629
+ }
630
+
630
631
static int __xsk_sendmsg (struct socket * sock , struct msghdr * m , size_t total_len )
631
632
{
632
633
bool need_wait = !(m -> msg_flags & MSG_DONTWAIT );
633
634
struct sock * sk = sock -> sk ;
634
635
struct xdp_sock * xs = xdp_sk (sk );
635
636
struct xsk_buff_pool * pool ;
637
+ int err ;
636
638
637
- if (unlikely (!xsk_is_bound (xs )))
638
- return - ENXIO ;
639
+ err = xsk_check_common (xs );
640
+ if (err )
641
+ return err ;
639
642
if (unlikely (need_wait ))
640
643
return - EOPNOTSUPP ;
644
+ if (unlikely (!xs -> tx ))
645
+ return - ENOBUFS ;
641
646
642
647
if (sk_can_busy_loop (sk )) {
643
648
if (xs -> zc )
@@ -649,8 +654,11 @@ static int __xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len
649
654
return 0 ;
650
655
651
656
pool = xs -> pool ;
652
- if (pool -> cached_need_wakeup & XDP_WAKEUP_TX )
653
- return xsk_xmit (sk );
657
+ if (pool -> cached_need_wakeup & XDP_WAKEUP_TX ) {
658
+ if (xs -> zc )
659
+ return xsk_wakeup (xs , XDP_WAKEUP_TX );
660
+ return xsk_generic_xmit (sk );
661
+ }
654
662
return 0 ;
655
663
}
656
664
@@ -670,11 +678,11 @@ static int __xsk_recvmsg(struct socket *sock, struct msghdr *m, size_t len, int
670
678
bool need_wait = !(flags & MSG_DONTWAIT );
671
679
struct sock * sk = sock -> sk ;
672
680
struct xdp_sock * xs = xdp_sk (sk );
681
+ int err ;
673
682
674
- if (unlikely (!xsk_is_bound (xs )))
675
- return - ENXIO ;
676
- if (unlikely (!(xs -> dev -> flags & IFF_UP )))
677
- return - ENETDOWN ;
683
+ err = xsk_check_common (xs );
684
+ if (err )
685
+ return err ;
678
686
if (unlikely (!xs -> rx ))
679
687
return - ENOBUFS ;
680
688
if (unlikely (need_wait ))
@@ -713,21 +721,20 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
713
721
sock_poll_wait (file , sock , wait );
714
722
715
723
rcu_read_lock ();
716
- if (unlikely (!xsk_is_bound (xs ))) {
717
- rcu_read_unlock ();
718
- return mask ;
719
- }
724
+ if (xsk_check_common (xs ))
725
+ goto skip_tx ;
720
726
721
727
pool = xs -> pool ;
722
728
723
729
if (pool -> cached_need_wakeup ) {
724
730
if (xs -> zc )
725
731
xsk_wakeup (xs , pool -> cached_need_wakeup );
726
- else
732
+ else if ( xs -> tx )
727
733
/* Poll needs to drive Tx also in copy mode */
728
- xsk_xmit (sk );
734
+ xsk_generic_xmit (sk );
729
735
}
730
736
737
+ skip_tx :
731
738
if (xs -> rx && !xskq_prod_is_empty (xs -> rx ))
732
739
mask |= EPOLLIN | EPOLLRDNORM ;
733
740
if (xs -> tx && xsk_tx_writeable (xs ))
0 commit comments