@@ -7730,7 +7730,8 @@ static void ixgbe_service_task(struct work_struct *work)
7730
7730
7731
7731
static int ixgbe_tso (struct ixgbe_ring * tx_ring ,
7732
7732
struct ixgbe_tx_buffer * first ,
7733
- u8 * hdr_len )
7733
+ u8 * hdr_len ,
7734
+ struct ixgbe_ipsec_tx_data * itd )
7734
7735
{
7735
7736
u32 vlan_macip_lens , type_tucmd , mss_l4len_idx ;
7736
7737
struct sk_buff * skb = first -> skb ;
@@ -7744,6 +7745,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7744
7745
unsigned char * hdr ;
7745
7746
} l4 ;
7746
7747
u32 paylen , l4_offset ;
7748
+ u32 fceof_saidx = 0 ;
7747
7749
int err ;
7748
7750
7749
7751
if (skb -> ip_summed != CHECKSUM_PARTIAL )
@@ -7769,13 +7771,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7769
7771
if (ip .v4 -> version == 4 ) {
7770
7772
unsigned char * csum_start = skb_checksum_start (skb );
7771
7773
unsigned char * trans_start = ip .hdr + (ip .v4 -> ihl * 4 );
7774
+ int len = csum_start - trans_start ;
7772
7775
7773
7776
/* IP header will have to cancel out any data that
7774
- * is not a part of the outer IP header
7777
+ * is not a part of the outer IP header, so set to
7778
+ * a reverse csum if needed, else init check to 0.
7775
7779
*/
7776
- ip .v4 -> check = csum_fold ( csum_partial ( trans_start ,
7777
- csum_start - trans_start ,
7778
- 0 ));
7780
+ ip .v4 -> check = ( skb_shinfo ( skb ) -> gso_type & SKB_GSO_PARTIAL ) ?
7781
+ csum_fold ( csum_partial ( trans_start ,
7782
+ len , 0 )) : 0 ;
7779
7783
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4 ;
7780
7784
7781
7785
ip .v4 -> tot_len = 0 ;
@@ -7806,12 +7810,15 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
7806
7810
mss_l4len_idx = (* hdr_len - l4_offset ) << IXGBE_ADVTXD_L4LEN_SHIFT ;
7807
7811
mss_l4len_idx |= skb_shinfo (skb )-> gso_size << IXGBE_ADVTXD_MSS_SHIFT ;
7808
7812
7813
+ fceof_saidx |= itd -> sa_idx ;
7814
+ type_tucmd |= itd -> flags | itd -> trailer_len ;
7815
+
7809
7816
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
7810
7817
vlan_macip_lens = l4 .hdr - ip .hdr ;
7811
7818
vlan_macip_lens |= (ip .hdr - skb -> data ) << IXGBE_ADVTXD_MACLEN_SHIFT ;
7812
7819
vlan_macip_lens |= first -> tx_flags & IXGBE_TX_FLAGS_VLAN_MASK ;
7813
7820
7814
- ixgbe_tx_ctxtdesc (tx_ring , vlan_macip_lens , 0 , type_tucmd ,
7821
+ ixgbe_tx_ctxtdesc (tx_ring , vlan_macip_lens , fceof_saidx , type_tucmd ,
7815
7822
mss_l4len_idx );
7816
7823
7817
7824
return 1 ;
@@ -8502,7 +8509,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8502
8509
if (skb -> sp && !ixgbe_ipsec_tx (tx_ring , first , & ipsec_tx ))
8503
8510
goto out_drop ;
8504
8511
#endif
8505
- tso = ixgbe_tso (tx_ring , first , & hdr_len );
8512
+ tso = ixgbe_tso (tx_ring , first , & hdr_len , & ipsec_tx );
8506
8513
if (tso < 0 )
8507
8514
goto out_drop ;
8508
8515
else if (!tso )
@@ -9911,9 +9918,15 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
9911
9918
9912
9919
/* We can only support IPV4 TSO in tunnels if we can mangle the
9913
9920
* inner IP ID field, so strip TSO if MANGLEID is not supported.
9921
+ * IPsec offoad sets skb->encapsulation but still can handle
9922
+ * the TSO, so it's the exception.
9914
9923
*/
9915
- if (skb -> encapsulation && !(features & NETIF_F_TSO_MANGLEID ))
9916
- features &= ~NETIF_F_TSO ;
9924
+ if (skb -> encapsulation && !(features & NETIF_F_TSO_MANGLEID )) {
9925
+ #ifdef CONFIG_XFRM
9926
+ if (!skb -> sp )
9927
+ #endif
9928
+ features &= ~NETIF_F_TSO ;
9929
+ }
9917
9930
9918
9931
return features ;
9919
9932
}
0 commit comments