@@ -235,19 +235,18 @@ static bool rswitch_is_queue_rxed(struct rswitch_gwca_queue *gq)
235
235
return false;
236
236
}
237
237
238
- static int rswitch_gwca_queue_alloc_skb (struct rswitch_gwca_queue * gq ,
239
- unsigned int start_index ,
240
- unsigned int num )
238
+ static int rswitch_gwca_queue_alloc_rx_buf (struct rswitch_gwca_queue * gq ,
239
+ unsigned int start_index ,
240
+ unsigned int num )
241
241
{
242
242
unsigned int i , index ;
243
243
244
244
for (i = 0 ; i < num ; i ++ ) {
245
245
index = (i + start_index ) % gq -> ring_size ;
246
- if (gq -> skbs [index ])
246
+ if (gq -> rx_bufs [index ])
247
247
continue ;
248
- gq -> skbs [index ] = netdev_alloc_skb_ip_align (gq -> ndev ,
249
- PKT_BUF_SZ + RSWITCH_ALIGN - 1 );
250
- if (!gq -> skbs [index ])
248
+ gq -> rx_bufs [index ] = netdev_alloc_frag (RSWITCH_BUF_SIZE );
249
+ if (!gq -> rx_bufs [index ])
251
250
goto err ;
252
251
}
253
252
@@ -256,8 +255,8 @@ static int rswitch_gwca_queue_alloc_skb(struct rswitch_gwca_queue *gq,
256
255
err :
257
256
for (; i -- > 0 ; ) {
258
257
index = (i + start_index ) % gq -> ring_size ;
259
- dev_kfree_skb (gq -> skbs [index ]);
260
- gq -> skbs [index ] = NULL ;
258
+ skb_free_frag (gq -> rx_bufs [index ]);
259
+ gq -> rx_bufs [index ] = NULL ;
261
260
}
262
261
263
262
return - ENOMEM ;
@@ -275,16 +274,17 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
275
274
gq -> rx_ring = NULL ;
276
275
277
276
for (i = 0 ; i < gq -> ring_size ; i ++ )
278
- dev_kfree_skb (gq -> skbs [i ]);
277
+ skb_free_frag (gq -> rx_bufs [i ]);
278
+ kfree (gq -> rx_bufs );
279
+ gq -> rx_bufs = NULL ;
279
280
} else {
280
281
dma_free_coherent (ndev -> dev .parent ,
281
282
sizeof (struct rswitch_ext_desc ) *
282
283
(gq -> ring_size + 1 ), gq -> tx_ring , gq -> ring_dma );
283
284
gq -> tx_ring = NULL ;
285
+ kfree (gq -> skbs );
286
+ gq -> skbs = NULL ;
284
287
}
285
-
286
- kfree (gq -> skbs );
287
- gq -> skbs = NULL ;
288
288
}
289
289
290
290
static void rswitch_gwca_ts_queue_free (struct rswitch_private * priv )
@@ -308,17 +308,20 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
308
308
gq -> ring_size = ring_size ;
309
309
gq -> ndev = ndev ;
310
310
311
- gq -> skbs = kcalloc (gq -> ring_size , sizeof (* gq -> skbs ), GFP_KERNEL );
312
- if (!gq -> skbs )
313
- return - ENOMEM ;
314
-
315
311
if (!dir_tx ) {
316
- rswitch_gwca_queue_alloc_skb (gq , 0 , gq -> ring_size );
312
+ gq -> rx_bufs = kcalloc (gq -> ring_size , sizeof (* gq -> rx_bufs ), GFP_KERNEL );
313
+ if (!gq -> rx_bufs )
314
+ return - ENOMEM ;
315
+ if (rswitch_gwca_queue_alloc_rx_buf (gq , 0 , gq -> ring_size ) < 0 )
316
+ goto out ;
317
317
318
318
gq -> rx_ring = dma_alloc_coherent (ndev -> dev .parent ,
319
319
sizeof (struct rswitch_ext_ts_desc ) *
320
320
(gq -> ring_size + 1 ), & gq -> ring_dma , GFP_KERNEL );
321
321
} else {
322
+ gq -> skbs = kcalloc (gq -> ring_size , sizeof (* gq -> skbs ), GFP_KERNEL );
323
+ if (!gq -> skbs )
324
+ return - ENOMEM ;
322
325
gq -> tx_ring = dma_alloc_coherent (ndev -> dev .parent ,
323
326
sizeof (struct rswitch_ext_desc ) *
324
327
(gq -> ring_size + 1 ), & gq -> ring_dma , GFP_KERNEL );
@@ -367,12 +370,13 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
367
370
for (i = 0 , desc = gq -> tx_ring ; i < gq -> ring_size ; i ++ , desc ++ ) {
368
371
if (!gq -> dir_tx ) {
369
372
dma_addr = dma_map_single (ndev -> dev .parent ,
370
- gq -> skbs [i ]-> data , PKT_BUF_SZ ,
373
+ gq -> rx_bufs [i ] + RSWITCH_HEADROOM ,
374
+ RSWITCH_MAP_BUF_SIZE ,
371
375
DMA_FROM_DEVICE );
372
376
if (dma_mapping_error (ndev -> dev .parent , dma_addr ))
373
377
goto err ;
374
378
375
- desc -> desc .info_ds = cpu_to_le16 (PKT_BUF_SZ );
379
+ desc -> desc .info_ds = cpu_to_le16 (RSWITCH_DESC_BUF_SIZE );
376
380
rswitch_desc_set_dptr (& desc -> desc , dma_addr );
377
381
desc -> desc .die_dt = DT_FEMPTY | DIE ;
378
382
} else {
@@ -395,8 +399,8 @@ static int rswitch_gwca_queue_format(struct net_device *ndev,
395
399
if (!gq -> dir_tx ) {
396
400
for (desc = gq -> tx_ring ; i -- > 0 ; desc ++ ) {
397
401
dma_addr = rswitch_desc_get_dptr (& desc -> desc );
398
- dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ ,
399
- DMA_FROM_DEVICE );
402
+ dma_unmap_single (ndev -> dev .parent , dma_addr ,
403
+ RSWITCH_MAP_BUF_SIZE , DMA_FROM_DEVICE );
400
404
}
401
405
}
402
406
@@ -433,12 +437,13 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
433
437
desc = & gq -> rx_ring [index ];
434
438
if (!gq -> dir_tx ) {
435
439
dma_addr = dma_map_single (ndev -> dev .parent ,
436
- gq -> skbs [index ]-> data , PKT_BUF_SZ ,
440
+ gq -> rx_bufs [index ] + RSWITCH_HEADROOM ,
441
+ RSWITCH_MAP_BUF_SIZE ,
437
442
DMA_FROM_DEVICE );
438
443
if (dma_mapping_error (ndev -> dev .parent , dma_addr ))
439
444
goto err ;
440
445
441
- desc -> desc .info_ds = cpu_to_le16 (PKT_BUF_SZ );
446
+ desc -> desc .info_ds = cpu_to_le16 (RSWITCH_DESC_BUF_SIZE );
442
447
rswitch_desc_set_dptr (& desc -> desc , dma_addr );
443
448
dma_wmb ();
444
449
desc -> desc .die_dt = DT_FEMPTY | DIE ;
@@ -456,8 +461,8 @@ static int rswitch_gwca_queue_ext_ts_fill(struct net_device *ndev,
456
461
index = (i + start_index ) % gq -> ring_size ;
457
462
desc = & gq -> rx_ring [index ];
458
463
dma_addr = rswitch_desc_get_dptr (& desc -> desc );
459
- dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ ,
460
- DMA_FROM_DEVICE );
464
+ dma_unmap_single (ndev -> dev .parent , dma_addr ,
465
+ RSWITCH_MAP_BUF_SIZE , DMA_FROM_DEVICE );
461
466
}
462
467
}
463
468
@@ -724,10 +729,15 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
724
729
while ((desc -> desc .die_dt & DT_MASK ) != DT_FEMPTY ) {
725
730
dma_rmb ();
726
731
pkt_len = le16_to_cpu (desc -> desc .info_ds ) & RX_DS ;
727
- skb = gq -> skbs [gq -> cur ];
728
- gq -> skbs [gq -> cur ] = NULL ;
729
732
dma_addr = rswitch_desc_get_dptr (& desc -> desc );
730
- dma_unmap_single (ndev -> dev .parent , dma_addr , PKT_BUF_SZ , DMA_FROM_DEVICE );
733
+ dma_unmap_single (ndev -> dev .parent , dma_addr ,
734
+ RSWITCH_MAP_BUF_SIZE , DMA_FROM_DEVICE );
735
+ skb = build_skb (gq -> rx_bufs [gq -> cur ], RSWITCH_BUF_SIZE );
736
+ if (!skb )
737
+ goto out ;
738
+ skb_reserve (skb , RSWITCH_HEADROOM );
739
+ skb_put (skb , pkt_len );
740
+
731
741
get_ts = rdev -> priv -> ptp_priv -> tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT ;
732
742
if (get_ts ) {
733
743
struct skb_shared_hwtstamps * shhwtstamps ;
@@ -739,12 +749,13 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
739
749
ts .tv_nsec = __le32_to_cpu (desc -> ts_nsec & cpu_to_le32 (0x3fffffff ));
740
750
shhwtstamps -> hwtstamp = timespec64_to_ktime (ts );
741
751
}
742
- skb_put (skb , pkt_len );
743
752
skb -> protocol = eth_type_trans (skb , ndev );
744
753
napi_gro_receive (& rdev -> napi , skb );
745
754
rdev -> ndev -> stats .rx_packets ++ ;
746
755
rdev -> ndev -> stats .rx_bytes += pkt_len ;
747
756
757
+ out :
758
+ gq -> rx_bufs [gq -> cur ] = NULL ;
748
759
gq -> cur = rswitch_next_queue_index (gq , true, 1 );
749
760
desc = & gq -> rx_ring [gq -> cur ];
750
761
@@ -753,7 +764,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota)
753
764
}
754
765
755
766
num = rswitch_get_num_cur_queues (gq );
756
- ret = rswitch_gwca_queue_alloc_skb (gq , gq -> dirty , num );
767
+ ret = rswitch_gwca_queue_alloc_rx_buf (gq , gq -> dirty , num );
757
768
if (ret < 0 )
758
769
goto err ;
759
770
ret = rswitch_gwca_queue_ext_ts_fill (ndev , gq , gq -> dirty , num );
0 commit comments