@@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp)
632
632
"Unable to allocate sk_buff\n" );
633
633
break ;
634
634
}
635
- bp -> rx_skbuff [entry ] = skb ;
636
635
637
636
/* now fill corresponding descriptor entry */
638
637
paddr = dma_map_single (& bp -> pdev -> dev , skb -> data ,
639
638
bp -> rx_buffer_size , DMA_FROM_DEVICE );
639
+ if (dma_mapping_error (& bp -> pdev -> dev , paddr )) {
640
+ dev_kfree_skb (skb );
641
+ break ;
642
+ }
643
+
644
+ bp -> rx_skbuff [entry ] = skb ;
640
645
641
646
if (entry == RX_RING_SIZE - 1 )
642
647
paddr |= MACB_BIT (RX_WRAP );
@@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1036
1041
}
1037
1042
1038
1043
entry = macb_tx_ring_wrap (bp -> tx_head );
1039
- bp -> tx_head ++ ;
1040
1044
netdev_vdbg (bp -> dev , "Allocated ring entry %u\n" , entry );
1041
1045
mapping = dma_map_single (& bp -> pdev -> dev , skb -> data ,
1042
1046
len , DMA_TO_DEVICE );
1047
+ if (dma_mapping_error (& bp -> pdev -> dev , mapping )) {
1048
+ kfree_skb (skb );
1049
+ goto unlock ;
1050
+ }
1043
1051
1052
+ bp -> tx_head ++ ;
1044
1053
tx_skb = & bp -> tx_skb [entry ];
1045
1054
tx_skb -> skb = skb ;
1046
1055
tx_skb -> mapping = mapping ;
@@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1066
1075
if (CIRC_SPACE (bp -> tx_head , bp -> tx_tail , TX_RING_SIZE ) < 1 )
1067
1076
netif_stop_queue (dev );
1068
1077
1078
+ unlock :
1069
1079
spin_unlock_irqrestore (& bp -> lock , flags );
1070
1080
1071
1081
return NETDEV_TX_OK ;
0 commit comments