Blob Blame History Raw
From: Bryan Tan <bryantan@vmware.com>
Date: Wed, 20 Dec 2017 11:27:00 -0800
Subject: RDMA/vmw_pvrdma: Use refcount_t instead of atomic_t
Patch-mainline: v4.16-rc1
Git-commit: a61eb6136829173d51dd0c7f7248733025708eeb
References: bsc#1103992 FATE#326009

refcount_t is the preferred type for refcounts. Change the
QP and CQ refcnt fields to use refcount_t.

Reviewed-by: Adit Ranadive <aditr@vmware.com>
Reviewed-by: Aditya Sarwade <asarwade@vmware.com>
Reviewed-by: Jorgen Hansen <jhansen@vmware.com>
Signed-off-by: Bryan Tan <bryantan@vmware.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h      |    4 ++--
 drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c   |    4 ++--
 drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c |   12 ++++++------
 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c   |    4 ++--
 4 files changed, 12 insertions(+), 12 deletions(-)

--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -93,7 +93,7 @@ struct pvrdma_cq {
 	struct pvrdma_page_dir pdir;
 	u32 cq_handle;
 	bool is_kernel;
-	atomic_t refcnt;
+	refcount_t refcnt;
 	struct completion free;
 };
 
@@ -196,7 +196,7 @@ struct pvrdma_qp {
 	u8 state;
 	bool is_kernel;
 	struct mutex mutex; /* QP state mutex. */
-	atomic_t refcnt;
+	refcount_t refcnt;
 	struct completion free;
 };
 
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -177,7 +177,7 @@ struct ib_cq *pvrdma_create_cq(struct ib
 	else
 		pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
 
-	atomic_set(&cq->refcnt, 1);
+	refcount_set(&cq->refcnt, 1);
 	init_completion(&cq->free);
 	spin_lock_init(&cq->cq_lock);
 
@@ -229,7 +229,7 @@ err_cq:
 
 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
 {
-	if (atomic_dec_and_test(&cq->refcnt))
+	if (refcount_dec_and_test(&cq->refcnt))
 		complete(&cq->free);
 	wait_for_completion(&cq->free);
 
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -333,7 +333,7 @@ static void pvrdma_qp_event(struct pvrdm
 	spin_lock_irqsave(&dev->qp_tbl_lock, flags);
 	qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
 	if (qp)
-		atomic_inc(&qp->refcnt);
+		refcount_inc(&qp->refcnt);
 	spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
 
 	if (qp && qp->ibqp.event_handler) {
@@ -346,7 +346,7 @@ static void pvrdma_qp_event(struct pvrdm
 		ibqp->event_handler(&e, ibqp->qp_context);
 	}
 	if (qp) {
-		if (atomic_dec_and_test(&qp->refcnt))
+		if (refcount_dec_and_test(&qp->refcnt))
 			complete(&qp->free);
 	}
 }
@@ -359,7 +359,7 @@ static void pvrdma_cq_event(struct pvrdm
 	spin_lock_irqsave(&dev->cq_tbl_lock, flags);
 	cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
 	if (cq)
-		atomic_inc(&cq->refcnt);
+		refcount_inc(&cq->refcnt);
 	spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
 
 	if (cq && cq->ibcq.event_handler) {
@@ -372,7 +372,7 @@ static void pvrdma_cq_event(struct pvrdm
 		ibcq->event_handler(&e, ibcq->cq_context);
 	}
 	if (cq) {
-		if (atomic_dec_and_test(&cq->refcnt))
+		if (refcount_dec_and_test(&cq->refcnt))
 			complete(&cq->free);
 	}
 }
@@ -531,13 +531,13 @@ static irqreturn_t pvrdma_intrx_handler(
 		spin_lock_irqsave(&dev->cq_tbl_lock, flags);
 		cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
 		if (cq)
-			atomic_inc(&cq->refcnt);
+			refcount_inc(&cq->refcnt);
 		spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
 
 		if (cq && cq->ibcq.comp_handler)
 			cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
 		if (cq) {
-			if (atomic_dec_and_test(&cq->refcnt))
+			if (refcount_dec_and_test(&cq->refcnt))
 				complete(&cq->free);
 		}
 		pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
@@ -245,7 +245,7 @@ struct ib_qp *pvrdma_create_qp(struct ib
 		spin_lock_init(&qp->sq.lock);
 		spin_lock_init(&qp->rq.lock);
 		mutex_init(&qp->mutex);
-		atomic_set(&qp->refcnt, 1);
+		refcount_set(&qp->refcnt, 1);
 		init_completion(&qp->free);
 
 		qp->state = IB_QPS_RESET;
@@ -427,7 +427,7 @@ static void pvrdma_free_qp(struct pvrdma
 
 	pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
 
-	if (atomic_dec_and_test(&qp->refcnt))
+	if (refcount_dec_and_test(&qp->refcnt))
 		complete(&qp->free);
 	wait_for_completion(&qp->free);