Blob Blame History Raw
From: Aharon Landau <aharonl@nvidia.com>
Date: Tue, 12 Apr 2022 10:24:02 +0300
Subject: RDMA/mlx5: Use mlx5_umr_post_send_wait() to revoke MRs
Patch-mainline: v5.19-rc1
Git-commit: 33e8aa8e049811de87cd1c16a2ead85e0c9f9606
References: jsc#PED-1552

Move the revoke_mr logic to umr.c, and using mlx5_umr_post_send_wait()
instead of mlx5_ib_post_send_wait().

In the new implementation, do not zero out the access flags. Before
reusing the MR, we will update it to the required access.

Link: https://lore.kernel.org/r/63717dfdaf6007f81b3e6dbf598f5bf3875ce86f.1649747695.git.leonro@nvidia.com
Signed-off-by: Aharon Landau <aharonl@nvidia.com>
Reviewed-by: Michael Guralnik <michaelgur@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/mlx5/mr.c  |   31 +++----------------------------
 drivers/infiniband/hw/mlx5/umr.c |   29 +++++++++++++++++++++++++++++
 drivers/infiniband/hw/mlx5/umr.h |    2 ++
 3 files changed, 34 insertions(+), 28 deletions(-)

--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1632,31 +1632,6 @@ err_dereg_mr:
 	return ERR_PTR(err);
 }
 
-/**
- * revoke_mr - Fence all DMA on the MR
- * @mr: The MR to fence
- *
- * Upon return the NIC will not be doing any DMA to the pages under the MR,
- * and any DMA in progress will be completed. Failure of this function
- * indicates the HW has failed catastrophically.
- */
-static int revoke_mr(struct mlx5_ib_mr *mr)
-{
-	struct mlx5_umr_wr umrwr = {};
-
-	if (mr_to_mdev(mr)->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
-		return 0;
-
-	umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
-			      MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
-	umrwr.wr.opcode = MLX5_IB_WR_UMR;
-	umrwr.pd = mr_to_mdev(mr)->umrc.pd;
-	umrwr.mkey = mr->mmkey.key;
-	umrwr.ignore_free_state = 1;
-
-	return mlx5_ib_post_send_wait(mr_to_mdev(mr), &umrwr);
-}
-
 /*
  * True if the change in access flags can be done via UMR, only some access
  * flags can be updated.
@@ -1733,7 +1708,7 @@ static int umr_rereg_pas(struct mlx5_ib_
 	 * with it. This ensure the change is atomic relative to any use of the
 	 * MR.
 	 */
-	err = revoke_mr(mr);
+	err = mlx5r_umr_revoke_mr(mr);
 	if (err)
 		return err;
 
@@ -1811,7 +1786,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(stru
 		 * Only one active MR can refer to a umem at one time, revoke
 		 * the old MR before assigning the umem to the new one.
 		 */
-		err = revoke_mr(mr);
+		err = mlx5r_umr_revoke_mr(mr);
 		if (err)
 			return ERR_PTR(err);
 		umem = mr->umem;
@@ -1956,7 +1931,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr,
 
 	/* Stop DMA */
 	if (mr->cache_ent) {
-		if (revoke_mr(mr)) {
+		if (mlx5r_umr_revoke_mr(mr)) {
 			spin_lock_irq(&mr->cache_ent->lock);
 			mr->cache_ent->total_mrs--;
 			spin_unlock_irq(&mr->cache_ent->lock);
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -320,3 +320,32 @@ static int mlx5r_umr_post_send_wait(stru
 	up(&umrc->sem);
 	return err;
 }
+
+/**
+ * mlx5r_umr_revoke_mr - Fence all DMA on the MR
+ * @mr: The MR to fence
+ *
+ * Upon return the NIC will not be doing any DMA to the pages under the MR,
+ * and any DMA in progress will be completed. Failure of this function
+ * indicates the HW has failed catastrophically.
+ */
+int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr)
+{
+	struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+	struct mlx5r_umr_wqe wqe = {};
+
+	if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+		return 0;
+
+	wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+	wqe.ctrl_seg.mkey_mask |= get_umr_disable_mr_mask();
+	wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
+
+	MLX5_SET(mkc, &wqe.mkey_seg, free, 1);
+	MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(dev->umrc.pd)->pdn);
+	MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+	MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+		 mlx5_mkey_variant(mr->mmkey.key));
+
+	return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+}
--- a/drivers/infiniband/hw/mlx5/umr.h
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -91,4 +91,6 @@ struct mlx5r_umr_wqe {
 	struct mlx5_wqe_data_seg data_seg;
 };
 
+int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
+
 #endif /* _MLX5_IB_UMR_H */