Skip to content

Commit d445516

Browse files
jrfastabdavem330
authored andcommitted
net: xdp: support xdp generic on virtual devices
XDP generic allows users to test XDP programs and/or run them with degraded performance on devices that do not yet support XDP. For testing I typically test eBPF programs using a set of veth devices. This allows testing topologies that would otherwise be difficult to setup especially in the early stages of development. This patch adds a xdp generic hook to the netif_rx_internal() function which is called from dev_forward_skb(). With this addition attaching XDP programs to veth devices works as expected! Also I noticed multiple drivers using netif_rx(). These devices will also benefit and generic XDP will work for them as well. Signed-off-by: John Fastabend <[email protected]> Tested-by: Andy Gospodarek <[email protected]> Acked-by: Daniel Borkmann <[email protected]> Acked-by: Jesper Dangaard Brouer <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 90382dc commit d445516

File tree

1 file changed

+113
-95
lines changed

1 file changed

+113
-95
lines changed

Diff for: net/core/dev.c

+113-95
Original file line numberDiff line numberDiff line change
@@ -3865,13 +3865,122 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
38653865
return NET_RX_DROP;
38663866
}
38673867

3868+
static u32 netif_receive_generic_xdp(struct sk_buff *skb,
3869+
struct bpf_prog *xdp_prog)
3870+
{
3871+
struct xdp_buff xdp;
3872+
u32 act = XDP_DROP;
3873+
void *orig_data;
3874+
int hlen, off;
3875+
u32 mac_len;
3876+
3877+
/* Reinjected packets coming from act_mirred or similar should
3878+
* not get XDP generic processing.
3879+
*/
3880+
if (skb_cloned(skb))
3881+
return XDP_PASS;
3882+
3883+
if (skb_linearize(skb))
3884+
goto do_drop;
3885+
3886+
/* The XDP program wants to see the packet starting at the MAC
3887+
* header.
3888+
*/
3889+
mac_len = skb->data - skb_mac_header(skb);
3890+
hlen = skb_headlen(skb) + mac_len;
3891+
xdp.data = skb->data - mac_len;
3892+
xdp.data_end = xdp.data + hlen;
3893+
xdp.data_hard_start = skb->data - skb_headroom(skb);
3894+
orig_data = xdp.data;
3895+
3896+
act = bpf_prog_run_xdp(xdp_prog, &xdp);
3897+
3898+
off = xdp.data - orig_data;
3899+
if (off > 0)
3900+
__skb_pull(skb, off);
3901+
else if (off < 0)
3902+
__skb_push(skb, -off);
3903+
3904+
switch (act) {
3905+
case XDP_TX:
3906+
__skb_push(skb, mac_len);
3907+
/* fall through */
3908+
case XDP_PASS:
3909+
break;
3910+
3911+
default:
3912+
bpf_warn_invalid_xdp_action(act);
3913+
/* fall through */
3914+
case XDP_ABORTED:
3915+
trace_xdp_exception(skb->dev, xdp_prog, act);
3916+
/* fall through */
3917+
case XDP_DROP:
3918+
do_drop:
3919+
kfree_skb(skb);
3920+
break;
3921+
}
3922+
3923+
return act;
3924+
}
3925+
3926+
/* When doing generic XDP we have to bypass the qdisc layer and the
3927+
* network taps in order to match in-driver-XDP behavior.
3928+
*/
3929+
static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
3930+
{
3931+
struct net_device *dev = skb->dev;
3932+
struct netdev_queue *txq;
3933+
bool free_skb = true;
3934+
int cpu, rc;
3935+
3936+
txq = netdev_pick_tx(dev, skb, NULL);
3937+
cpu = smp_processor_id();
3938+
HARD_TX_LOCK(dev, txq, cpu);
3939+
if (!netif_xmit_stopped(txq)) {
3940+
rc = netdev_start_xmit(skb, dev, txq, 0);
3941+
if (dev_xmit_complete(rc))
3942+
free_skb = false;
3943+
}
3944+
HARD_TX_UNLOCK(dev, txq);
3945+
if (free_skb) {
3946+
trace_xdp_exception(dev, xdp_prog, XDP_TX);
3947+
kfree_skb(skb);
3948+
}
3949+
}
3950+
3951+
static struct static_key generic_xdp_needed __read_mostly;
3952+
3953+
static int do_xdp_generic(struct sk_buff *skb)
3954+
{
3955+
struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog);
3956+
3957+
if (xdp_prog) {
3958+
u32 act = netif_receive_generic_xdp(skb, xdp_prog);
3959+
3960+
if (act != XDP_PASS) {
3961+
if (act == XDP_TX)
3962+
generic_xdp_tx(skb, xdp_prog);
3963+
return XDP_DROP;
3964+
}
3965+
}
3966+
return XDP_PASS;
3967+
}
3968+
38683969
static int netif_rx_internal(struct sk_buff *skb)
38693970
{
38703971
int ret;
38713972

38723973
net_timestamp_check(netdev_tstamp_prequeue, skb);
38733974

38743975
trace_netif_rx(skb);
3976+
3977+
if (static_key_false(&generic_xdp_needed)) {
3978+
int ret = do_xdp_generic(skb);
3979+
3980+
if (ret != XDP_PASS)
3981+
return NET_RX_DROP;
3982+
}
3983+
38753984
#ifdef CONFIG_RPS
38763985
if (static_key_false(&rps_needed)) {
38773986
struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -4338,8 +4447,6 @@ static int __netif_receive_skb(struct sk_buff *skb)
43384447
return ret;
43394448
}
43404449

4341-
static struct static_key generic_xdp_needed __read_mostly;
4342-
43434450
static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
43444451
{
43454452
struct bpf_prog *old = rtnl_dereference(dev->xdp_prog);
@@ -4373,89 +4480,6 @@ static int generic_xdp_install(struct net_device *dev, struct netdev_xdp *xdp)
43734480
return ret;
43744481
}
43754482

4376-
static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4377-
struct bpf_prog *xdp_prog)
4378-
{
4379-
struct xdp_buff xdp;
4380-
u32 act = XDP_DROP;
4381-
void *orig_data;
4382-
int hlen, off;
4383-
u32 mac_len;
4384-
4385-
/* Reinjected packets coming from act_mirred or similar should
4386-
* not get XDP generic processing.
4387-
*/
4388-
if (skb_cloned(skb))
4389-
return XDP_PASS;
4390-
4391-
if (skb_linearize(skb))
4392-
goto do_drop;
4393-
4394-
/* The XDP program wants to see the packet starting at the MAC
4395-
* header.
4396-
*/
4397-
mac_len = skb->data - skb_mac_header(skb);
4398-
hlen = skb_headlen(skb) + mac_len;
4399-
xdp.data = skb->data - mac_len;
4400-
xdp.data_end = xdp.data + hlen;
4401-
xdp.data_hard_start = skb->data - skb_headroom(skb);
4402-
orig_data = xdp.data;
4403-
4404-
act = bpf_prog_run_xdp(xdp_prog, &xdp);
4405-
4406-
off = xdp.data - orig_data;
4407-
if (off > 0)
4408-
__skb_pull(skb, off);
4409-
else if (off < 0)
4410-
__skb_push(skb, -off);
4411-
4412-
switch (act) {
4413-
case XDP_TX:
4414-
__skb_push(skb, mac_len);
4415-
/* fall through */
4416-
case XDP_PASS:
4417-
break;
4418-
4419-
default:
4420-
bpf_warn_invalid_xdp_action(act);
4421-
/* fall through */
4422-
case XDP_ABORTED:
4423-
trace_xdp_exception(skb->dev, xdp_prog, act);
4424-
/* fall through */
4425-
case XDP_DROP:
4426-
do_drop:
4427-
kfree_skb(skb);
4428-
break;
4429-
}
4430-
4431-
return act;
4432-
}
4433-
4434-
/* When doing generic XDP we have to bypass the qdisc layer and the
4435-
* network taps in order to match in-driver-XDP behavior.
4436-
*/
4437-
static void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
4438-
{
4439-
struct net_device *dev = skb->dev;
4440-
struct netdev_queue *txq;
4441-
bool free_skb = true;
4442-
int cpu, rc;
4443-
4444-
txq = netdev_pick_tx(dev, skb, NULL);
4445-
cpu = smp_processor_id();
4446-
HARD_TX_LOCK(dev, txq, cpu);
4447-
if (!netif_xmit_stopped(txq)) {
4448-
rc = netdev_start_xmit(skb, dev, txq, 0);
4449-
if (dev_xmit_complete(rc))
4450-
free_skb = false;
4451-
}
4452-
HARD_TX_UNLOCK(dev, txq);
4453-
if (free_skb) {
4454-
trace_xdp_exception(dev, xdp_prog, XDP_TX);
4455-
kfree_skb(skb);
4456-
}
4457-
}
4458-
44594483
static int netif_receive_skb_internal(struct sk_buff *skb)
44604484
{
44614485
int ret;
@@ -4468,17 +4492,11 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
44684492
rcu_read_lock();
44694493

44704494
if (static_key_false(&generic_xdp_needed)) {
4471-
struct bpf_prog *xdp_prog = rcu_dereference(skb->dev->xdp_prog);
4472-
4473-
if (xdp_prog) {
4474-
u32 act = netif_receive_generic_xdp(skb, xdp_prog);
4495+
int ret = do_xdp_generic(skb);
44754496

4476-
if (act != XDP_PASS) {
4477-
rcu_read_unlock();
4478-
if (act == XDP_TX)
4479-
generic_xdp_tx(skb, xdp_prog);
4480-
return NET_RX_DROP;
4481-
}
4497+
if (ret != XDP_PASS) {
4498+
rcu_read_unlock();
4499+
return NET_RX_DROP;
44824500
}
44834501
}
44844502

0 commit comments

Comments
 (0)