From: Alex Liu <liualex@fb.com>
Date: Thu, 20 Jan 2022 11:34:59 -0800
Subject: net/mlx5e: Add support for using xdp->data_meta
Patch-mainline: v5.18-rc1
Git-commit: c1e80bf4ad3e91dd21a8ccdee129052e25ace7e6
References: jsc#PED-1549
Add support for using xdp->data_meta for cross-program communication
Pass "true" to the last argument of xdp_prepare_buff().
After SKB is built, call skb_metadata_set() if metadata was pushed.
Signed-off-by: Alex Liu <liualex@fb.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 17 ++++++++++++-----
1 file changed, 12 insertions(+), 5 deletions(-)
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1490,7 +1490,7 @@ static inline void mlx5e_complete_rx_cqe
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
- u32 cqe_bcnt)
+ u32 cqe_bcnt, u32 metasize)
{
struct sk_buff *skb = build_skb(va, frag_size);
@@ -1502,6 +1502,9 @@ struct sk_buff *mlx5e_build_linear_skb(s
skb_reserve(skb, headroom);
skb_put(skb, cqe_bcnt);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
return skb;
}
@@ -1509,7 +1512,7 @@ static void mlx5e_fill_xdp_buff(struct m
u32 len, struct xdp_buff *xdp)
{
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, va, headroom, len, false);
+ xdp_prepare_buff(xdp, va, headroom, len, true);
}
static struct sk_buff *
@@ -1522,6 +1525,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_r
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
+ u32 metasize;
va = page_address(di->page) + wi->offset;
data = va + rx_headroom;
@@ -1538,7 +1542,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_r
rx_headroom = xdp.data - xdp.data_hard_start;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
+ metasize = xdp.data - xdp.data_meta;
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
@@ -1837,6 +1842,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct m
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
+ u32 metasize;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
@@ -1862,7 +1868,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct m
rx_headroom = xdp.data - xdp.data_hard_start;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
- skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
+ metasize = xdp.data - xdp.data_meta;
+ skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);
if (unlikely(!skb))
return NULL;
@@ -1893,7 +1900,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_r
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
prefetchw(hdr);
prefetch(data);
- skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
return NULL;