From: Aharon Landau <aharonl@nvidia.com>
Date: Tue, 12 Apr 2022 10:24:03 +0300
Subject: RDMA/mlx5: Use mlx5_umr_post_send_wait() to rereg pd access
Patch-mainline: v5.19-rc1
Git-commit: 483196764091621b8dd45d7af29e7a9c874a9f19
References: jsc#PED-1552
Move rereg_pd_access logic to umr.c, and use mlx5_umr_post_send_wait()
instead of mlx5_ib_post_send_wait().
Link: https://lore.kernel.org/r/18da4f47edbc2561f652b7ee4e7a5269e866af77.1649747695.git.leonro@nvidia.com
Signed-off-by: Aharon Landau <aharonl@nvidia.com>
Reviewed-by: Michael Guralnik <michaelgur@nvidia.com>
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
drivers/infiniband/hw/mlx5/mr.c | 27 +------------------------
drivers/infiniband/hw/mlx5/umr.c | 41 +++++++++++++++++++++++++++++++++++++++
drivers/infiniband/hw/mlx5/umr.h | 2 +
3 files changed, 45 insertions(+), 25 deletions(-)
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1649,30 +1649,6 @@ static bool can_use_umr_rereg_access(str
target_access_flags);
}
-static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
- int access_flags)
-{
- struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- struct mlx5_umr_wr umrwr = {
- .wr = {
- .send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
- MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS,
- .opcode = MLX5_IB_WR_UMR,
- },
- .mkey = mr->mmkey.key,
- .pd = pd,
- .access_flags = access_flags,
- };
- int err;
-
- err = mlx5_ib_post_send_wait(dev, &umrwr);
- if (err)
- return err;
-
- mr->access_flags = access_flags;
- return 0;
-}
-
static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
struct ib_umem *new_umem,
int new_access_flags, u64 iova,
@@ -1773,7 +1749,8 @@ struct ib_mr *mlx5_ib_rereg_user_mr(stru
/* Fast path for PD/access change */
if (can_use_umr_rereg_access(dev, mr->access_flags,
new_access_flags)) {
- err = umr_rereg_pd_access(mr, new_pd, new_access_flags);
+ err = mlx5r_umr_rereg_pd_access(mr, new_pd,
+ new_access_flags);
if (err)
return ERR_PTR(err);
return NULL;
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -349,3 +349,44 @@ int mlx5r_umr_revoke_mr(struct mlx5_ib_m
return mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
}
+
+static void mlx5r_umr_set_access_flags(struct mlx5_ib_dev *dev,
+ struct mlx5_mkey_seg *seg,
+ unsigned int access_flags)
+{
+ MLX5_SET(mkc, seg, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
+ MLX5_SET(mkc, seg, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
+ MLX5_SET(mkc, seg, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
+ MLX5_SET(mkc, seg, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
+ MLX5_SET(mkc, seg, lr, 1);
+ MLX5_SET(mkc, seg, relaxed_ordering_write,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+ MLX5_SET(mkc, seg, relaxed_ordering_read,
+ !!(access_flags & IB_ACCESS_RELAXED_ORDERING));
+}
+
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags)
+{
+ struct mlx5_ib_dev *dev = mr_to_mdev(mr);
+ struct mlx5r_umr_wqe wqe = {};
+ int err;
+
+ wqe.ctrl_seg.mkey_mask = get_umr_update_access_mask(dev);
+ wqe.ctrl_seg.mkey_mask |= get_umr_update_pd_mask();
+ wqe.ctrl_seg.flags = MLX5_UMR_CHECK_FREE;
+ wqe.ctrl_seg.flags |= MLX5_UMR_INLINE;
+
+ mlx5r_umr_set_access_flags(dev, &wqe.mkey_seg, access_flags);
+ MLX5_SET(mkc, &wqe.mkey_seg, pd, to_mpd(pd)->pdn);
+ MLX5_SET(mkc, &wqe.mkey_seg, qpn, 0xffffff);
+ MLX5_SET(mkc, &wqe.mkey_seg, mkey_7_0,
+ mlx5_mkey_variant(mr->mmkey.key));
+
+ err = mlx5r_umr_post_send_wait(dev, mr->mmkey.key, &wqe, false);
+ if (err)
+ return err;
+
+ mr->access_flags = access_flags;
+ return 0;
+}
--- a/drivers/infiniband/hw/mlx5/umr.h
+++ b/drivers/infiniband/hw/mlx5/umr.h
@@ -92,5 +92,7 @@ struct mlx5r_umr_wqe {
};
int mlx5r_umr_revoke_mr(struct mlx5_ib_mr *mr);
+int mlx5r_umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
+ int access_flags);
#endif /* _MLX5_IB_UMR_H */