|
27 | 27 | #include <linux/slab.h>
|
28 | 28 | #include <linux/cpu.h>
|
29 | 29 | #include <linux/average.h>
|
| 30 | +#include <net/busy_poll.h> |
30 | 31 |
|
31 | 32 | static int napi_weight = NAPI_POLL_WEIGHT;
|
32 | 33 | module_param(napi_weight, int, 0444);
|
@@ -521,6 +522,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
|
521 | 522 | skb_shinfo(skb)->gso_segs = 0;
|
522 | 523 | }
|
523 | 524 |
|
| 525 | + skb_mark_napi_id(skb, &rq->napi); |
| 526 | + |
524 | 527 | netif_receive_skb(skb);
|
525 | 528 | return;
|
526 | 529 |
|
@@ -769,6 +772,43 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
|
769 | 772 | return received;
|
770 | 773 | }
|
771 | 774 |
|
| 775 | +#ifdef CONFIG_NET_RX_BUSY_POLL |
| 776 | +/* must be called with local_bh_disable()d */ |
| 777 | +static int virtnet_busy_poll(struct napi_struct *napi) |
| 778 | +{ |
| 779 | + struct receive_queue *rq = |
| 780 | + container_of(napi, struct receive_queue, napi); |
| 781 | + struct virtnet_info *vi = rq->vq->vdev->priv; |
| 782 | + int r, received = 0, budget = 4; |
| 783 | + |
| 784 | + if (!(vi->status & VIRTIO_NET_S_LINK_UP)) |
| 785 | + return LL_FLUSH_FAILED; |
| 786 | + |
| 787 | + if (!napi_schedule_prep(napi)) |
| 788 | + return LL_FLUSH_BUSY; |
| 789 | + |
| 790 | + virtqueue_disable_cb(rq->vq); |
| 791 | + |
| 792 | +again: |
| 793 | + received += virtnet_receive(rq, budget); |
| 794 | + |
| 795 | + r = virtqueue_enable_cb_prepare(rq->vq); |
| 796 | + clear_bit(NAPI_STATE_SCHED, &napi->state); |
| 797 | + if (unlikely(virtqueue_poll(rq->vq, r)) && |
| 798 | + napi_schedule_prep(napi)) { |
| 799 | + virtqueue_disable_cb(rq->vq); |
| 800 | + if (received < budget) { |
| 801 | + budget -= received; |
| 802 | + goto again; |
| 803 | + } else { |
| 804 | + __napi_schedule(napi); |
| 805 | + } |
| 806 | + } |
| 807 | + |
| 808 | + return received; |
| 809 | +} |
| 810 | +#endif /* CONFIG_NET_RX_BUSY_POLL */ |
| 811 | + |
772 | 812 | static int virtnet_open(struct net_device *dev)
|
773 | 813 | {
|
774 | 814 | struct virtnet_info *vi = netdev_priv(dev);
|
@@ -1356,6 +1396,9 @@ static const struct net_device_ops virtnet_netdev = {
|
1356 | 1396 | #ifdef CONFIG_NET_POLL_CONTROLLER
|
1357 | 1397 | .ndo_poll_controller = virtnet_netpoll,
|
1358 | 1398 | #endif
|
| 1399 | +#ifdef CONFIG_NET_RX_BUSY_POLL |
| 1400 | + .ndo_busy_poll = virtnet_busy_poll, |
| 1401 | +#endif |
1359 | 1402 | };
|
1360 | 1403 |
|
1361 | 1404 | static void virtnet_config_changed_work(struct work_struct *work)
|
@@ -1561,6 +1604,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
|
1561 | 1604 | vi->rq[i].pages = NULL;
|
1562 | 1605 | netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
|
1563 | 1606 | napi_weight);
|
| 1607 | + napi_hash_add(&vi->rq[i].napi); |
1564 | 1608 |
|
1565 | 1609 | sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
|
1566 | 1610 | ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
|
@@ -1862,11 +1906,13 @@ static int virtnet_freeze(struct virtio_device *vdev)
|
1862 | 1906 | netif_device_detach(vi->dev);
|
1863 | 1907 | cancel_delayed_work_sync(&vi->refill);
|
1864 | 1908 |
|
1865 |
| - if (netif_running(vi->dev)) |
| 1909 | + if (netif_running(vi->dev)) { |
1866 | 1910 | for (i = 0; i < vi->max_queue_pairs; i++) {
|
1867 | 1911 | napi_disable(&vi->rq[i].napi);
|
| 1912 | + napi_hash_del(&vi->rq[i].napi); |
1868 | 1913 | netif_napi_del(&vi->rq[i].napi);
|
1869 | 1914 | }
|
| 1915 | + } |
1870 | 1916 |
|
1871 | 1917 | remove_vq_common(vi);
|
1872 | 1918 |
|
|
0 commit comments