Skip to content

Commit c1ece6b

Browse files
netoptimizerAlexei Starovoitov
authored and
Alexei Starovoitov
committed
bpf/xdp: devmap can avoid calling ndo_xdp_flush
The XDP_REDIRECT map devmap can avoid using ndo_xdp_flush, by instead instructing ndo_xdp_xmit to flush via XDP_XMIT_FLUSH flag in appropriate places. Notice after this patch it is possible to remove ndo_xdp_flush completely, as this is the last user of ndo_xdp_flush. This is left for later patches, to keep driver changes separate. Signed-off-by: Jesper Dangaard Brouer <[email protected]> Acked-by: Song Liu <[email protected]> Signed-off-by: Alexei Starovoitov <[email protected]>
1 parent 1e67575 commit c1ece6b

File tree

1 file changed

+6
-13
lines changed

1 file changed

+6
-13
lines changed

kernel/bpf/devmap.c

+6-13
Original file line numberDiff line numberDiff line change
@@ -217,7 +217,7 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
217217
}
218218

219219
static int bq_xmit_all(struct bpf_dtab_netdev *obj,
220-
struct xdp_bulk_queue *bq)
220+
struct xdp_bulk_queue *bq, u32 flags)
221221
{
222222
struct net_device *dev = obj->dev;
223223
int sent = 0, drops = 0, err = 0;
@@ -232,7 +232,7 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
232232
prefetch(xdpf);
233233
}
234234

235-
sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, 0);
235+
sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
236236
if (sent < 0) {
237237
err = sent;
238238
sent = 0;
@@ -276,7 +276,6 @@ void __dev_map_flush(struct bpf_map *map)
276276
for_each_set_bit(bit, bitmap, map->max_entries) {
277277
struct bpf_dtab_netdev *dev = READ_ONCE(dtab->netdev_map[bit]);
278278
struct xdp_bulk_queue *bq;
279-
struct net_device *netdev;
280279

281280
/* This is possible if the dev entry is removed by user space
282281
* between xdp redirect and flush op.
@@ -287,10 +286,7 @@ void __dev_map_flush(struct bpf_map *map)
287286
__clear_bit(bit, bitmap);
288287

289288
bq = this_cpu_ptr(dev->bulkq);
290-
bq_xmit_all(dev, bq);
291-
netdev = dev->dev;
292-
if (likely(netdev->netdev_ops->ndo_xdp_flush))
293-
netdev->netdev_ops->ndo_xdp_flush(netdev);
289+
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
294290
}
295291
}
296292

@@ -320,7 +316,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
320316
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);
321317

322318
if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
323-
bq_xmit_all(obj, bq);
319+
bq_xmit_all(obj, bq, 0);
324320

325321
/* Ingress dev_rx will be the same for all xdp_frame's in
326322
* bulk_queue, because bq stored per-CPU and must be flushed
@@ -359,8 +355,7 @@ static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
359355

360356
static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
361357
{
362-
if (dev->dev->netdev_ops->ndo_xdp_flush) {
363-
struct net_device *fl = dev->dev;
358+
if (dev->dev->netdev_ops->ndo_xdp_xmit) {
364359
struct xdp_bulk_queue *bq;
365360
unsigned long *bitmap;
366361

@@ -371,9 +366,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
371366
__clear_bit(dev->bit, bitmap);
372367

373368
bq = per_cpu_ptr(dev->bulkq, cpu);
374-
bq_xmit_all(dev, bq);
375-
376-
fl->netdev_ops->ndo_xdp_flush(dev->dev);
369+
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
377370
}
378371
}
379372
}

0 commit comments

Comments
 (0)