Blob Blame History Raw
From: Tariq Toukan <tariqt@mellanox.com>
Date: Wed, 18 Sep 2019 13:50:32 +0300
Subject: net/mlx5e: kTLS, Save only the frag page to release at completion
Patch-mainline: v5.4-rc6
Git-commit: f45da3716fb2fb09e301a1b6edf200ff343dc06e
References: jsc#SLE-8464

In TX resync flow where DUMP WQEs are posted, keep a pointer to
the fragment page to unref it upon completion, instead of saving
the whole fragment.

In addition, move it the end of the arguments list in tx_fill_wi().

Fixes: d2ead1f360e8 ("net/mlx5e: Add kTLS TX HW offload support")
Signed-off-by: Tariq Toukan <tariqt@mellanox.com>
Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/net/ethernet/mellanox/mlx5/core/en.h               |    2 
 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c |   27 ++++++-------
 2 files changed, 14 insertions(+), 15 deletions(-)

--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info {
 	u8  num_wqebbs;
 	u8  num_dma;
 #ifdef CONFIG_MLX5_EN_TLS
-	skb_frag_t *resync_dump_frag;
+	struct page *resync_dump_frag_page;
 #endif
 };
 
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -108,16 +108,15 @@ build_progress_params(struct mlx5e_tx_wq
 }
 
 static void tx_fill_wi(struct mlx5e_txqsq *sq,
-		       u16 pi, u8 num_wqebbs,
-		       skb_frag_t *resync_dump_frag,
-		       u32 num_bytes)
+		       u16 pi, u8 num_wqebbs, u32 num_bytes,
+		       struct page *page)
 {
 	struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
 
-	wi->skb              = NULL;
-	wi->num_wqebbs       = num_wqebbs;
-	wi->resync_dump_frag = resync_dump_frag;
-	wi->num_bytes        = num_bytes;
+	memset(wi, 0, sizeof(*wi));
+	wi->num_wqebbs = num_wqebbs;
+	wi->num_bytes  = num_bytes;
+	wi->resync_dump_frag_page = page;
 }
 
 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -145,7 +144,7 @@ post_static_params(struct mlx5e_txqsq *s
 
 	umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
 	build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
-	tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
+	tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
 	sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
 }
 
@@ -159,7 +158,7 @@ post_progress_params(struct mlx5e_txqsq
 
 	wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
 	build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
-	tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
+	tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
 	sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
 }
 
@@ -211,7 +210,7 @@ static bool tx_sync_info_get(struct mlx5
 	while (remaining > 0) {
 		skb_frag_t *frag = &record->frags[i];
 
-		__skb_frag_ref(frag);
+		get_page(skb_frag_page(frag));
 		remaining -= skb_frag_size(frag);
 		info->frags[i++] = frag;
 	}
@@ -284,7 +283,7 @@ tx_post_resync_dump(struct mlx5e_txqsq *
 	dseg->byte_count = cpu_to_be32(fsz);
 	mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 
-	tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, frag, fsz);
+	tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
 	sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
 
 	return 0;
@@ -297,14 +296,14 @@ void mlx5e_ktls_tx_handle_resync_dump_co
 	struct mlx5e_sq_stats *stats;
 	struct mlx5e_sq_dma *dma;
 
-	if (!wi->resync_dump_frag)
+	if (!wi->resync_dump_frag_page)
 		return;
 
 	dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
 	stats = sq->stats;
 
 	mlx5e_tx_dma_unmap(sq->pdev, dma);
-	__skb_frag_unref(wi->resync_dump_frag);
+	put_page(wi->resync_dump_frag_page);
 	stats->tls_dump_packets++;
 	stats->tls_dump_bytes += wi->num_bytes;
 }
@@ -314,7 +313,7 @@ static void tx_post_fence_nop(struct mlx
 	struct mlx5_wq_cyc *wq = &sq->wq;
 	u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 
-	tx_fill_wi(sq, pi, 1, NULL, 0);
+	tx_fill_wi(sq, pi, 1, 0, NULL);
 
 	mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
 }