@@ -763,6 +763,85 @@ void efx_remove_channels(struct efx_nic *efx)
763
763
kfree (efx -> xdp_tx_queues );
764
764
}
765
765
766
+ static int efx_set_xdp_tx_queue (struct efx_nic * efx , int xdp_queue_number ,
767
+ struct efx_tx_queue * tx_queue )
768
+ {
769
+ if (xdp_queue_number >= efx -> xdp_tx_queue_count )
770
+ return - EINVAL ;
771
+
772
+ netif_dbg (efx , drv , efx -> net_dev ,
773
+ "Channel %u TXQ %u is XDP %u, HW %u\n" ,
774
+ tx_queue -> channel -> channel , tx_queue -> label ,
775
+ xdp_queue_number , tx_queue -> queue );
776
+ efx -> xdp_tx_queues [xdp_queue_number ] = tx_queue ;
777
+ return 0 ;
778
+ }
779
+
780
+ static void efx_set_xdp_channels (struct efx_nic * efx )
781
+ {
782
+ struct efx_tx_queue * tx_queue ;
783
+ struct efx_channel * channel ;
784
+ unsigned int next_queue = 0 ;
785
+ int xdp_queue_number = 0 ;
786
+ int rc ;
787
+
788
+ /* We need to mark which channels really have RX and TX
789
+ * queues, and adjust the TX queue numbers if we have separate
790
+ * RX-only and TX-only channels.
791
+ */
792
+ efx_for_each_channel (channel , efx ) {
793
+ if (channel -> channel < efx -> tx_channel_offset )
794
+ continue ;
795
+
796
+ if (efx_channel_is_xdp_tx (channel )) {
797
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
798
+ tx_queue -> queue = next_queue ++ ;
799
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number ,
800
+ tx_queue );
801
+ if (rc == 0 )
802
+ xdp_queue_number ++ ;
803
+ }
804
+ } else {
805
+ efx_for_each_channel_tx_queue (tx_queue , channel ) {
806
+ tx_queue -> queue = next_queue ++ ;
807
+ netif_dbg (efx , drv , efx -> net_dev ,
808
+ "Channel %u TXQ %u is HW %u\n" ,
809
+ channel -> channel , tx_queue -> label ,
810
+ tx_queue -> queue );
811
+ }
812
+
813
+ /* If XDP is borrowing queues from net stack, it must
814
+ * use the queue with no csum offload, which is the
815
+ * first one of the channel
816
+ * (note: tx_queue_by_type is not initialized yet)
817
+ */
818
+ if (efx -> xdp_txq_queues_mode ==
819
+ EFX_XDP_TX_QUEUES_BORROWED ) {
820
+ tx_queue = & channel -> tx_queue [0 ];
821
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number ,
822
+ tx_queue );
823
+ if (rc == 0 )
824
+ xdp_queue_number ++ ;
825
+ }
826
+ }
827
+ }
828
+ WARN_ON (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
829
+ xdp_queue_number != efx -> xdp_tx_queue_count );
830
+ WARN_ON (efx -> xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
831
+ xdp_queue_number > efx -> xdp_tx_queue_count );
832
+
833
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
834
+ * existing queues to the exceeding CPUs
835
+ */
836
+ next_queue = 0 ;
837
+ while (xdp_queue_number < efx -> xdp_tx_queue_count ) {
838
+ tx_queue = efx -> xdp_tx_queues [next_queue ++ ];
839
+ rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
840
+ if (rc == 0 )
841
+ xdp_queue_number ++ ;
842
+ }
843
+ }
844
+
766
845
int efx_realloc_channels (struct efx_nic * efx , u32 rxq_entries , u32 txq_entries )
767
846
{
768
847
struct efx_channel * other_channel [EFX_MAX_CHANNELS ], * channel ;
@@ -837,6 +916,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
837
916
efx_init_napi_channel (efx -> channel [i ]);
838
917
}
839
918
919
+ efx_set_xdp_channels (efx );
840
920
out :
841
921
/* Destroy unused channel structures */
842
922
for (i = 0 ; i < efx -> n_channels ; i ++ ) {
@@ -872,26 +952,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
872
952
goto out ;
873
953
}
874
954
875
- static inline int
876
- efx_set_xdp_tx_queue (struct efx_nic * efx , int xdp_queue_number ,
877
- struct efx_tx_queue * tx_queue )
878
- {
879
- if (xdp_queue_number >= efx -> xdp_tx_queue_count )
880
- return - EINVAL ;
881
-
882
- netif_dbg (efx , drv , efx -> net_dev , "Channel %u TXQ %u is XDP %u, HW %u\n" ,
883
- tx_queue -> channel -> channel , tx_queue -> label ,
884
- xdp_queue_number , tx_queue -> queue );
885
- efx -> xdp_tx_queues [xdp_queue_number ] = tx_queue ;
886
- return 0 ;
887
- }
888
-
889
955
int efx_set_channels (struct efx_nic * efx )
890
956
{
891
- struct efx_tx_queue * tx_queue ;
892
957
struct efx_channel * channel ;
893
- unsigned int next_queue = 0 ;
894
- int xdp_queue_number ;
895
958
int rc ;
896
959
897
960
efx -> tx_channel_offset =
@@ -909,61 +972,14 @@ int efx_set_channels(struct efx_nic *efx)
909
972
return - ENOMEM ;
910
973
}
911
974
912
- /* We need to mark which channels really have RX and TX
913
- * queues, and adjust the TX queue numbers if we have separate
914
- * RX-only and TX-only channels.
915
- */
916
- xdp_queue_number = 0 ;
917
975
efx_for_each_channel (channel , efx ) {
918
976
if (channel -> channel < efx -> n_rx_channels )
919
977
channel -> rx_queue .core_index = channel -> channel ;
920
978
else
921
979
channel -> rx_queue .core_index = -1 ;
922
-
923
- if (channel -> channel >= efx -> tx_channel_offset ) {
924
- if (efx_channel_is_xdp_tx (channel )) {
925
- efx_for_each_channel_tx_queue (tx_queue , channel ) {
926
- tx_queue -> queue = next_queue ++ ;
927
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
928
- if (rc == 0 )
929
- xdp_queue_number ++ ;
930
- }
931
- } else {
932
- efx_for_each_channel_tx_queue (tx_queue , channel ) {
933
- tx_queue -> queue = next_queue ++ ;
934
- netif_dbg (efx , drv , efx -> net_dev , "Channel %u TXQ %u is HW %u\n" ,
935
- channel -> channel , tx_queue -> label ,
936
- tx_queue -> queue );
937
- }
938
-
939
- /* If XDP is borrowing queues from net stack, it must use the queue
940
- * with no csum offload, which is the first one of the channel
941
- * (note: channel->tx_queue_by_type is not initialized yet)
942
- */
943
- if (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED ) {
944
- tx_queue = & channel -> tx_queue [0 ];
945
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
946
- if (rc == 0 )
947
- xdp_queue_number ++ ;
948
- }
949
- }
950
- }
951
980
}
952
- WARN_ON (efx -> xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
953
- xdp_queue_number != efx -> xdp_tx_queue_count );
954
- WARN_ON (efx -> xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
955
- xdp_queue_number > efx -> xdp_tx_queue_count );
956
981
957
- /* If we have more CPUs than assigned XDP TX queues, assign the already
958
- * existing queues to the exceeding CPUs
959
- */
960
- next_queue = 0 ;
961
- while (xdp_queue_number < efx -> xdp_tx_queue_count ) {
962
- tx_queue = efx -> xdp_tx_queues [next_queue ++ ];
963
- rc = efx_set_xdp_tx_queue (efx , xdp_queue_number , tx_queue );
964
- if (rc == 0 )
965
- xdp_queue_number ++ ;
966
- }
982
+ efx_set_xdp_channels (efx );
967
983
968
984
rc = netif_set_real_num_tx_queues (efx -> net_dev , efx -> n_tx_channels );
969
985
if (rc )
0 commit comments