|
53 | 53 | #include <linux/dma-mapping.h>
|
54 | 54 | #include <linux/sort.h>
|
55 | 55 | #include <linux/phy_fixed.h>
|
| 56 | +#include <linux/bpf.h> |
| 57 | +#include <linux/bpf_trace.h> |
56 | 58 | #include <soc/fsl/bman.h>
|
57 | 59 | #include <soc/fsl/qman.h>
|
58 | 60 | #include "fman.h"
|
@@ -177,7 +179,7 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
|
177 | 179 | #define DPAA_HWA_SIZE (DPAA_PARSE_RESULTS_SIZE + DPAA_TIME_STAMP_SIZE \
|
178 | 180 | + DPAA_HASH_RESULTS_SIZE)
|
179 | 181 | #define DPAA_RX_PRIV_DATA_DEFAULT_SIZE (DPAA_TX_PRIV_DATA_SIZE + \
|
180 |
| - dpaa_rx_extra_headroom) |
| 182 | + XDP_PACKET_HEADROOM - DPAA_HWA_SIZE) |
181 | 183 | #ifdef CONFIG_DPAA_ERRATUM_A050385
|
182 | 184 | #define DPAA_RX_PRIV_DATA_A050385_SIZE (DPAA_A050385_ALIGN - DPAA_HWA_SIZE)
|
183 | 185 | #define DPAA_RX_PRIV_DATA_SIZE (fman_has_errata_a050385() ? \
|
@@ -1733,7 +1735,6 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
|
1733 | 1735 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
1734 | 1736 | if (WARN_ONCE(!skb, "Build skb failure on Rx\n"))
|
1735 | 1737 | goto free_buffer;
|
1736 |
| - WARN_ON(fd_off != priv->rx_headroom); |
1737 | 1738 | skb_reserve(skb, fd_off);
|
1738 | 1739 | skb_put(skb, qm_fd_get_length(fd));
|
1739 | 1740 |
|
@@ -2349,25 +2350,77 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
|
2349 | 2350 | return qman_cb_dqrr_consume;
|
2350 | 2351 | }
|
2351 | 2352 |
|
| 2353 | +static u32 dpaa_run_xdp(struct dpaa_priv *priv, struct qm_fd *fd, void *vaddr, |
| 2354 | + unsigned int *xdp_meta_len) |
| 2355 | +{ |
| 2356 | + ssize_t fd_off = qm_fd_get_offset(fd); |
| 2357 | + struct bpf_prog *xdp_prog; |
| 2358 | + struct xdp_buff xdp; |
| 2359 | + u32 xdp_act; |
| 2360 | + |
| 2361 | + rcu_read_lock(); |
| 2362 | + |
| 2363 | + xdp_prog = READ_ONCE(priv->xdp_prog); |
| 2364 | + if (!xdp_prog) { |
| 2365 | + rcu_read_unlock(); |
| 2366 | + return XDP_PASS; |
| 2367 | + } |
| 2368 | + |
| 2369 | + xdp.data = vaddr + fd_off; |
| 2370 | + xdp.data_meta = xdp.data; |
| 2371 | + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; |
| 2372 | + xdp.data_end = xdp.data + qm_fd_get_length(fd); |
| 2373 | + xdp.frame_sz = DPAA_BP_RAW_SIZE; |
| 2374 | + |
| 2375 | + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); |
| 2376 | + |
| 2377 | + /* Update the length and the offset of the FD */ |
| 2378 | + qm_fd_set_contig(fd, xdp.data - vaddr, xdp.data_end - xdp.data); |
| 2379 | + |
| 2380 | + switch (xdp_act) { |
| 2381 | + case XDP_PASS: |
| 2382 | + *xdp_meta_len = xdp.data - xdp.data_meta; |
| 2383 | + break; |
| 2384 | + default: |
| 2385 | + bpf_warn_invalid_xdp_action(xdp_act); |
| 2386 | + fallthrough; |
| 2387 | + case XDP_ABORTED: |
| 2388 | + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); |
| 2389 | + fallthrough; |
| 2390 | + case XDP_DROP: |
| 2391 | + /* Free the buffer */ |
| 2392 | + free_pages((unsigned long)vaddr, 0); |
| 2393 | + break; |
| 2394 | + } |
| 2395 | + |
| 2396 | + rcu_read_unlock(); |
| 2397 | + |
| 2398 | + return xdp_act; |
| 2399 | +} |
| 2400 | + |
2352 | 2401 | static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
2353 | 2402 | struct qman_fq *fq,
|
2354 | 2403 | const struct qm_dqrr_entry *dq,
|
2355 | 2404 | bool sched_napi)
|
2356 | 2405 | {
|
| 2406 | + bool ts_valid = false, hash_valid = false; |
2357 | 2407 | struct skb_shared_hwtstamps *shhwtstamps;
|
| 2408 | + unsigned int skb_len, xdp_meta_len = 0; |
2358 | 2409 | struct rtnl_link_stats64 *percpu_stats;
|
2359 | 2410 | struct dpaa_percpu_priv *percpu_priv;
|
2360 | 2411 | const struct qm_fd *fd = &dq->fd;
|
2361 | 2412 | dma_addr_t addr = qm_fd_addr(fd);
|
2362 | 2413 | enum qm_fd_format fd_format;
|
2363 | 2414 | struct net_device *net_dev;
|
2364 | 2415 | u32 fd_status, hash_offset;
|
| 2416 | + struct qm_sg_entry *sgt; |
2365 | 2417 | struct dpaa_bp *dpaa_bp;
|
2366 | 2418 | struct dpaa_priv *priv;
|
2367 |
| - unsigned int skb_len; |
2368 | 2419 | struct sk_buff *skb;
|
2369 | 2420 | int *count_ptr;
|
| 2421 | + u32 xdp_act; |
2370 | 2422 | void *vaddr;
|
| 2423 | + u32 hash; |
2371 | 2424 | u64 ns;
|
2372 | 2425 |
|
2373 | 2426 | fd_status = be32_to_cpu(fd->status);
|
@@ -2423,35 +2476,67 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
2423 | 2476 | count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
2424 | 2477 | (*count_ptr)--;
|
2425 | 2478 |
|
2426 |
| - if (likely(fd_format == qm_fd_contig)) |
| 2479 | + /* Extract the timestamp stored in the headroom before running XDP */ |
| 2480 | + if (priv->rx_tstamp) { |
| 2481 | + if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) |
| 2482 | + ts_valid = true; |
| 2483 | + else |
| 2484 | + WARN_ONCE(1, "fman_port_get_tstamp failed!\n"); |
| 2485 | + } |
| 2486 | + |
| 2487 | + /* Extract the hash stored in the headroom before running XDP */ |
| 2488 | + if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && |
| 2489 | + !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], |
| 2490 | + &hash_offset)) { |
| 2491 | + hash = be32_to_cpu(*(u32 *)(vaddr + hash_offset)); |
| 2492 | + hash_valid = true; |
| 2493 | + } |
| 2494 | + |
| 2495 | + if (likely(fd_format == qm_fd_contig)) { |
| 2496 | + xdp_act = dpaa_run_xdp(priv, (struct qm_fd *)fd, vaddr, |
| 2497 | + &xdp_meta_len); |
| 2498 | + if (xdp_act != XDP_PASS) { |
| 2499 | + percpu_stats->rx_packets++; |
| 2500 | + percpu_stats->rx_bytes += qm_fd_get_length(fd); |
| 2501 | + return qman_cb_dqrr_consume; |
| 2502 | + } |
2427 | 2503 | skb = contig_fd_to_skb(priv, fd);
|
2428 |
| - else |
| 2504 | + } else { |
| 2505 | + /* XDP doesn't support S/G frames. Return the fragments to the |
| 2506 | + * buffer pool and release the SGT. |
| 2507 | + */ |
| 2508 | + if (READ_ONCE(priv->xdp_prog)) { |
| 2509 | + WARN_ONCE(1, "S/G frames not supported under XDP\n"); |
| 2510 | + sgt = vaddr + qm_fd_get_offset(fd); |
| 2511 | + dpaa_release_sgt_members(sgt); |
| 2512 | + free_pages((unsigned long)vaddr, 0); |
| 2513 | + return qman_cb_dqrr_consume; |
| 2514 | + } |
2429 | 2515 | skb = sg_fd_to_skb(priv, fd);
|
| 2516 | + } |
2430 | 2517 | if (!skb)
|
2431 | 2518 | return qman_cb_dqrr_consume;
|
2432 | 2519 |
|
2433 |
| - if (priv->rx_tstamp) { |
| 2520 | + if (xdp_meta_len) |
| 2521 | + skb_metadata_set(skb, xdp_meta_len); |
| 2522 | + |
| 2523 | + /* Set the previously extracted timestamp */ |
| 2524 | + if (ts_valid) { |
2434 | 2525 | shhwtstamps = skb_hwtstamps(skb);
|
2435 | 2526 | memset(shhwtstamps, 0, sizeof(*shhwtstamps));
|
2436 |
| - |
2437 |
| - if (!fman_port_get_tstamp(priv->mac_dev->port[RX], vaddr, &ns)) |
2438 |
| - shhwtstamps->hwtstamp = ns_to_ktime(ns); |
2439 |
| - else |
2440 |
| - dev_warn(net_dev->dev.parent, "fman_port_get_tstamp failed!\n"); |
| 2527 | + shhwtstamps->hwtstamp = ns_to_ktime(ns); |
2441 | 2528 | }
|
2442 | 2529 |
|
2443 | 2530 | skb->protocol = eth_type_trans(skb, net_dev);
|
2444 | 2531 |
|
2445 |
| - if (net_dev->features & NETIF_F_RXHASH && priv->keygen_in_use && |
2446 |
| - !fman_port_get_hash_result_offset(priv->mac_dev->port[RX], |
2447 |
| - &hash_offset)) { |
| 2532 | + /* Set the previously extracted hash */ |
| 2533 | + if (hash_valid) { |
2448 | 2534 | enum pkt_hash_types type;
|
2449 | 2535 |
|
2450 | 2536 | /* if L4 exists, it was used in the hash generation */
|
2451 | 2537 | type = be32_to_cpu(fd->status) & FM_FD_STAT_L4CV ?
|
2452 | 2538 | PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
|
2453 |
| - skb_set_hash(skb, be32_to_cpu(*(u32 *)(vaddr + hash_offset)), |
2454 |
| - type); |
| 2539 | + skb_set_hash(skb, hash, type); |
2455 | 2540 | }
|
2456 | 2541 |
|
2457 | 2542 | skb_len = skb->len;
|
@@ -2671,6 +2756,55 @@ static int dpaa_eth_stop(struct net_device *net_dev)
|
2671 | 2756 | return err;
|
2672 | 2757 | }
|
2673 | 2758 |
|
| 2759 | +static int dpaa_setup_xdp(struct net_device *net_dev, struct netdev_bpf *bpf) |
| 2760 | +{ |
| 2761 | + struct dpaa_priv *priv = netdev_priv(net_dev); |
| 2762 | + struct bpf_prog *old_prog; |
| 2763 | + int err, max_contig_data; |
| 2764 | + bool up; |
| 2765 | + |
| 2766 | + max_contig_data = priv->dpaa_bp->size - priv->rx_headroom; |
| 2767 | + |
| 2768 | + /* S/G fragments are not supported in XDP-mode */ |
| 2769 | + if (bpf->prog && |
| 2770 | + (net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN > max_contig_data)) { |
| 2771 | + NL_SET_ERR_MSG_MOD(bpf->extack, "MTU too large for XDP"); |
| 2772 | + dev_warn(net_dev->dev.parent, |
| 2773 | + "The maximum MTU for XDP is %d\n", |
| 2774 | + max_contig_data - VLAN_ETH_HLEN - ETH_FCS_LEN); |
| 2775 | + return -EINVAL; |
| 2776 | + } |
| 2777 | + |
| 2778 | + up = netif_running(net_dev); |
| 2779 | + |
| 2780 | + if (up) |
| 2781 | + dpaa_eth_stop(net_dev); |
| 2782 | + |
| 2783 | + old_prog = xchg(&priv->xdp_prog, bpf->prog); |
| 2784 | + if (old_prog) |
| 2785 | + bpf_prog_put(old_prog); |
| 2786 | + |
| 2787 | + if (up) { |
| 2788 | + err = dpaa_open(net_dev); |
| 2789 | + if (err) { |
| 2790 | + NL_SET_ERR_MSG_MOD(bpf->extack, "dpaa_open() failed"); |
| 2791 | + return err; |
| 2792 | + } |
| 2793 | + } |
| 2794 | + |
| 2795 | + return 0; |
| 2796 | +} |
| 2797 | + |
| 2798 | +static int dpaa_xdp(struct net_device *net_dev, struct netdev_bpf *xdp) |
| 2799 | +{ |
| 2800 | + switch (xdp->command) { |
| 2801 | + case XDP_SETUP_PROG: |
| 2802 | + return dpaa_setup_xdp(net_dev, xdp); |
| 2803 | + default: |
| 2804 | + return -EINVAL; |
| 2805 | + } |
| 2806 | +} |
| 2807 | + |
2674 | 2808 | static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
2675 | 2809 | {
|
2676 | 2810 | struct dpaa_priv *priv = netdev_priv(dev);
|
@@ -2737,6 +2871,7 @@ static const struct net_device_ops dpaa_ops = {
|
2737 | 2871 | .ndo_set_rx_mode = dpaa_set_rx_mode,
|
2738 | 2872 | .ndo_do_ioctl = dpaa_ioctl,
|
2739 | 2873 | .ndo_setup_tc = dpaa_setup_tc,
|
| 2874 | + .ndo_bpf = dpaa_xdp, |
2740 | 2875 | };
|
2741 | 2876 |
|
2742 | 2877 | static int dpaa_napi_add(struct net_device *net_dev)
|
|
0 commit comments