Blob Blame History Raw
From: Bob Pearson <rpearsonhpe@gmail.com>
Date: Tue, 8 Feb 2022 15:16:35 -0600
Subject: RDMA/rxe: Move mcg_lock to rxe
Patch-mainline: v5.18-rc1
Git-commit: 9fd0eb7c3c73c80a7bbe28dc71ae8ec5698a7e84
References: jsc#PED-1111

Replace mcg->mcg_lock and mc_grp_pool->pool_lock by rxe->mcg_lock.  This
is the first step of several intended to decouple the mc_grp and mc_elem
objects from the rxe pool code.

Link: https://lore.kernel.org/r/20220208211644.123457-2-rpearsonhpe@gmail.com
Signed-off-by: Bob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/sw/rxe/rxe.c       |    2 ++
 drivers/infiniband/sw/rxe/rxe_mcast.c |   19 +++++++++----------
 drivers/infiniband/sw/rxe/rxe_recv.c  |    4 ++--
 drivers/infiniband/sw/rxe/rxe_verbs.h |    3 ++-
 4 files changed, 15 insertions(+), 13 deletions(-)

--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -211,6 +211,8 @@ static int rxe_init(struct rxe_dev *rxe)
 	spin_lock_init(&rxe->pending_lock);
 	INIT_LIST_HEAD(&rxe->pending_mmaps);
 
+	spin_lock_init(&rxe->mcg_lock);
+
 	mutex_init(&rxe->usdev_lock);
 
 	return 0;
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -25,7 +25,7 @@ static int rxe_mcast_delete(struct rxe_d
 	return dev_mc_del(rxe->ndev, ll_addr);
 }
 
-/* caller should hold mc_grp_pool->pool_lock */
+/* caller should hold rxe->mcg_lock */
 static struct rxe_mcg *create_grp(struct rxe_dev *rxe,
 				     struct rxe_pool *pool,
 				     union ib_gid *mgid)
@@ -38,7 +38,6 @@ static struct rxe_mcg *create_grp(struct
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&grp->qp_list);
-	spin_lock_init(&grp->mcg_lock);
 	grp->rxe = rxe;
 	rxe_add_key_locked(grp, mgid);
 
@@ -63,7 +62,7 @@ static int rxe_mcast_get_grp(struct rxe_
 	if (rxe->attr.max_mcast_qp_attach == 0)
 		return -EINVAL;
 
-	write_lock_irqsave(&pool->pool_lock, flags);
+	spin_lock_irqsave(&rxe->mcg_lock, flags);
 
 	grp = rxe_pool_get_key_locked(pool, mgid);
 	if (grp)
@@ -71,13 +70,13 @@ static int rxe_mcast_get_grp(struct rxe_
 
 	grp = create_grp(rxe, pool, mgid);
 	if (IS_ERR(grp)) {
-		write_unlock_irqrestore(&pool->pool_lock, flags);
+		spin_unlock_irqrestore(&rxe->mcg_lock, flags);
 		err = PTR_ERR(grp);
 		return err;
 	}
 
 done:
-	write_unlock_irqrestore(&pool->pool_lock, flags);
+	spin_unlock_irqrestore(&rxe->mcg_lock, flags);
 	*grp_p = grp;
 	return 0;
 }
@@ -90,7 +89,7 @@ static int rxe_mcast_add_grp_elem(struct
 	unsigned long flags;
 
 	/* check to see of the qp is already a member of the group */
-	spin_lock_irqsave(&grp->mcg_lock, flags);
+	spin_lock_irqsave(&rxe->mcg_lock, flags);
 	list_for_each_entry(elem, &grp->qp_list, qp_list) {
 		if (elem->qp == qp) {
 			err = 0;
@@ -120,7 +119,7 @@ static int rxe_mcast_add_grp_elem(struct
 
 	err = 0;
 out:
-	spin_unlock_irqrestore(&grp->mcg_lock, flags);
+	spin_unlock_irqrestore(&rxe->mcg_lock, flags);
 	return err;
 }
 
@@ -135,7 +134,7 @@ static int rxe_mcast_drop_grp_elem(struc
 	if (!grp)
 		goto err1;
 
-	spin_lock_irqsave(&grp->mcg_lock, flags);
+	spin_lock_irqsave(&rxe->mcg_lock, flags);
 
 	list_for_each_entry_safe(elem, tmp, &grp->qp_list, qp_list) {
 		if (elem->qp == qp) {
@@ -143,7 +142,7 @@ static int rxe_mcast_drop_grp_elem(struc
 			grp->num_qp--;
 			atomic_dec(&qp->mcg_num);
 
-			spin_unlock_irqrestore(&grp->mcg_lock, flags);
+			spin_unlock_irqrestore(&rxe->mcg_lock, flags);
 			rxe_drop_ref(elem);
 			rxe_drop_ref(grp);	/* ref held by QP */
 			rxe_drop_ref(grp);	/* ref from get_key */
@@ -151,7 +150,7 @@ static int rxe_mcast_drop_grp_elem(struc
 		}
 	}
 
-	spin_unlock_irqrestore(&grp->mcg_lock, flags);
+	spin_unlock_irqrestore(&rxe->mcg_lock, flags);
 	rxe_drop_ref(grp);			/* ref from get_key */
 err1:
 	return -EINVAL;
--- a/drivers/infiniband/sw/rxe/rxe_recv.c
+++ b/drivers/infiniband/sw/rxe/rxe_recv.c
@@ -250,7 +250,7 @@ static void rxe_rcv_mcast_pkt(struct rxe
 	if (!mcg)
 		goto drop;	/* mcast group not registered */
 
-	spin_lock_bh(&mcg->mcg_lock);
+	spin_lock_bh(&rxe->mcg_lock);
 
 	/* this is unreliable datagram service so we let
 	 * failures to deliver a multicast packet to a
@@ -298,7 +298,7 @@ static void rxe_rcv_mcast_pkt(struct rxe
 		}
 	}
 
-	spin_unlock_bh(&mcg->mcg_lock);
+	spin_unlock_bh(&rxe->mcg_lock);
 
 	rxe_drop_ref(mcg);	/* drop ref from rxe_pool_get_key. */
 
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -353,7 +353,6 @@ struct rxe_mw {
 
 struct rxe_mcg {
 	struct rxe_pool_elem	elem;
-	spinlock_t		mcg_lock; /* guard group */
 	struct rxe_dev		*rxe;
 	struct list_head	qp_list;
 	union ib_gid		mgid;
@@ -399,6 +398,8 @@ struct rxe_dev {
 	struct rxe_pool		mc_grp_pool;
 	struct rxe_pool		mc_elem_pool;
 
+	spinlock_t		mcg_lock;
+
 	spinlock_t		pending_lock; /* guard pending_mmaps */
 	struct list_head	pending_mmaps;