Skip to content

Commit 9181563

Browse files
jasowangdavem330
authored andcommitted
virtio-net: rx busy polling support
Add basic support for rx busy polling. Instead of introducing new states and spinlock to synchronize between NAPI and polling method, this patch just reuse NAPI state to avoid extra overhead for fast path and simplified the codes. Test was done between a kvm guest and an external host. Two hosts were connected through 40gb mlx4 cards. With both busy_poll and busy_read are set to 50 in guest, 1 byte netperf tcp_rr shows 127% improvement: transaction rate was increased from 8353.33 to 18966.87. Cc: Rusty Russell <[email protected]> Cc: Michael S. Tsirkin <[email protected]> Cc: Vlad Yasevich <[email protected]> Cc: Eric Dumazet <[email protected]> Signed-off-by: Jason Wang <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 2ffa759 commit 9181563

File tree

1 file changed

+47
-1
lines changed

1 file changed

+47
-1
lines changed

drivers/net/virtio_net.c

Lines changed: 47 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727
#include <linux/slab.h>
2828
#include <linux/cpu.h>
2929
#include <linux/average.h>
30+
#include <net/busy_poll.h>
3031

3132
static int napi_weight = NAPI_POLL_WEIGHT;
3233
module_param(napi_weight, int, 0444);
@@ -521,6 +522,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
521522
skb_shinfo(skb)->gso_segs = 0;
522523
}
523524

525+
skb_mark_napi_id(skb, &rq->napi);
526+
524527
netif_receive_skb(skb);
525528
return;
526529

@@ -769,6 +772,43 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
769772
return received;
770773
}
771774

775+
#ifdef CONFIG_NET_RX_BUSY_POLL
776+
/* must be called with local_bh_disable()d */
777+
static int virtnet_busy_poll(struct napi_struct *napi)
778+
{
779+
struct receive_queue *rq =
780+
container_of(napi, struct receive_queue, napi);
781+
struct virtnet_info *vi = rq->vq->vdev->priv;
782+
int r, received = 0, budget = 4;
783+
784+
if (!(vi->status & VIRTIO_NET_S_LINK_UP))
785+
return LL_FLUSH_FAILED;
786+
787+
if (!napi_schedule_prep(napi))
788+
return LL_FLUSH_BUSY;
789+
790+
virtqueue_disable_cb(rq->vq);
791+
792+
again:
793+
received += virtnet_receive(rq, budget);
794+
795+
r = virtqueue_enable_cb_prepare(rq->vq);
796+
clear_bit(NAPI_STATE_SCHED, &napi->state);
797+
if (unlikely(virtqueue_poll(rq->vq, r)) &&
798+
napi_schedule_prep(napi)) {
799+
virtqueue_disable_cb(rq->vq);
800+
if (received < budget) {
801+
budget -= received;
802+
goto again;
803+
} else {
804+
__napi_schedule(napi);
805+
}
806+
}
807+
808+
return received;
809+
}
810+
#endif /* CONFIG_NET_RX_BUSY_POLL */
811+
772812
static int virtnet_open(struct net_device *dev)
773813
{
774814
struct virtnet_info *vi = netdev_priv(dev);
@@ -1356,6 +1396,9 @@ static const struct net_device_ops virtnet_netdev = {
13561396
#ifdef CONFIG_NET_POLL_CONTROLLER
13571397
.ndo_poll_controller = virtnet_netpoll,
13581398
#endif
1399+
#ifdef CONFIG_NET_RX_BUSY_POLL
1400+
.ndo_busy_poll = virtnet_busy_poll,
1401+
#endif
13591402
};
13601403

13611404
static void virtnet_config_changed_work(struct work_struct *work)
@@ -1561,6 +1604,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
15611604
vi->rq[i].pages = NULL;
15621605
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
15631606
napi_weight);
1607+
napi_hash_add(&vi->rq[i].napi);
15641608

15651609
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
15661610
ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
@@ -1862,11 +1906,13 @@ static int virtnet_freeze(struct virtio_device *vdev)
18621906
netif_device_detach(vi->dev);
18631907
cancel_delayed_work_sync(&vi->refill);
18641908

1865-
if (netif_running(vi->dev))
1909+
if (netif_running(vi->dev)) {
18661910
for (i = 0; i < vi->max_queue_pairs; i++) {
18671911
napi_disable(&vi->rq[i].napi);
1912+
napi_hash_del(&vi->rq[i].napi);
18681913
netif_napi_del(&vi->rq[i].napi);
18691914
}
1915+
}
18701916

18711917
remove_vq_common(vi);
18721918

0 commit comments

Comments
 (0)