Blob Blame History Raw
From: Maxim Mikityanskiy <maximmi@nvidia.com>
Date: Fri, 28 Jan 2022 16:43:14 +0200
Subject: net/mlx5e: Move mlx5e_xdpi_fifo_push out of xmit_xdp_frame
Patch-mainline: v5.18-rc1
Git-commit: 49529a1726850bddd7a991de6e8ef52c65c49fbe
References: jsc#PED-1549

The implementations of xmit_xdp_frame get the xdpi parameter of type
struct mlx5e_xdp_info for the sole purpose of calling
mlx5e_xdpi_fifo_push() on success.

This commit moves this call outside of xmit_xdp_frame, shifting this
responsibility to the caller. It will allow more fine-grained handling
of XDP info for cases when an xdp_frame is fragmented.

Signed-off-by: Maxim Mikityanskiy <maximmi@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h        |    1 -
 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c    |   17 ++++++++++-------
 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h    |    2 --
 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c |    4 +++-
 4 files changed, 13 insertions(+), 11 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -537,7 +537,6 @@ struct mlx5e_xdpsq;
 typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
 typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
 					struct mlx5e_xmit_data *,
-					struct mlx5e_xdp_info *,
 					int);
 
 struct mlx5e_xdpsq {
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -119,8 +119,12 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *
 		xdpi.page.page = page;
 	}
 
-	return INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-			       mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+	if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
+				      mlx5e_xmit_xdp_frame, sq, &xdptxd, 0)))
+		return false;
+
+	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
+	return true;
 }
 
 /* returns true if packet was consumed by xdp */
@@ -261,7 +265,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_x
 
 INDIRECT_CALLABLE_SCOPE bool
 mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
-			   struct mlx5e_xdp_info *xdpi, int check_result)
+			   int check_result)
 {
 	struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
 	struct mlx5e_xdpsq_stats *stats = sq->stats;
@@ -289,7 +293,6 @@ mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_
 	if (unlikely(mlx5e_xdp_mpqwe_is_full(session, sq->max_sq_mpw_wqebbs)))
 		mlx5e_xdp_mpwqe_complete(sq);
 
-	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
 	stats->xmit++;
 	return true;
 }
@@ -308,7 +311,7 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_x
 
 INDIRECT_CALLABLE_SCOPE bool
 mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
-		     struct mlx5e_xdp_info *xdpi, int check_result)
+		     int check_result)
 {
 	struct mlx5_wq_cyc       *wq   = &sq->wq;
 	u16                       pi   = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
@@ -358,7 +361,6 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq
 
 	sq->doorbell_cseg = cseg;
 
-	mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
 	stats->xmit++;
 	return true;
 }
@@ -537,12 +539,13 @@ int mlx5e_xdp_xmit(struct net_device *de
 		xdpi.frame.dma_addr = xdptxd.dma_addr;
 
 		ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-				      mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, 0);
+				      mlx5e_xmit_xdp_frame, sq, &xdptxd, 0);
 		if (unlikely(!ret)) {
 			dma_unmap_single(sq->pdev, xdptxd.dma_addr,
 					 xdptxd.len, DMA_TO_DEVICE);
 			break;
 		}
+		mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
 		nxmit++;
 	}
 
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -59,11 +59,9 @@ int mlx5e_xdp_xmit(struct net_device *de
 
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
 							  struct mlx5e_xmit_data *xdptxd,
-							  struct mlx5e_xdp_info *xdpi,
 							  int check_result));
 INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
 						    struct mlx5e_xmit_data *xdptxd,
-						    struct mlx5e_xdp_info *xdpi,
 						    int check_result));
 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
 INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -103,12 +103,14 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq
 		xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
 
 		ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
-				      mlx5e_xmit_xdp_frame, sq, &xdptxd, &xdpi, check_result);
+				      mlx5e_xmit_xdp_frame, sq, &xdptxd, check_result);
 		if (unlikely(!ret)) {
 			if (sq->mpwqe.wqe)
 				mlx5e_xdp_mpwqe_complete(sq);
 
 			mlx5e_xsk_tx_post_err(sq, &xdpi);
+		} else {
+			mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
 		}
 
 		flush = true;