Blob Blame History Raw
From: Jason Gunthorpe <jgg@nvidia.com>
Date: Tue, 29 Sep 2020 21:24:35 -0300
Subject: RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
Patch-mainline: v5.10-rc1
Git-commit: 6ef999f500213284bb038f1ff0c402791c60e380
References: bsc#1175499

This driver is taking the SGL out of the umem and passing it through a
struct bnxt_qplib_sg_info. Instead of passing the SGL pass the umem and
then use rdma_umem_for_each_dma_block() directly.

Move the calls of ib_umem_num_dma_blocks() closer to their actual point of
use, npages is only set for non-umem pbl flows.

Link: https://lore.kernel.org/r/0-v1-b37437a73f35+49c-bnxt_re_dma_block_jgg@nvidia.com
Acked-by: Selvin Xavier <selvin.xavier@broadcom.com>
Tested-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/bnxt_re/ib_verbs.c  |   18 ++++--------------
 drivers/infiniband/hw/bnxt_re/qplib_res.c |   30 +++++++++++++++++-------------
 drivers/infiniband/hw/bnxt_re/qplib_res.h |    3 +--
 3 files changed, 22 insertions(+), 29 deletions(-)

--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -938,9 +938,7 @@ static int bnxt_re_init_user_qp(struct b
 		return PTR_ERR(umem);
 
 	qp->sumem = umem;
-	qplib_qp->sq.sg_info.sghead = umem->sg_head.sgl;
-	qplib_qp->sq.sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-	qplib_qp->sq.sg_info.nmap = umem->nmap;
+	qplib_qp->sq.sg_info.umem = umem;
 	qplib_qp->sq.sg_info.pgsize = PAGE_SIZE;
 	qplib_qp->sq.sg_info.pgshft = PAGE_SHIFT;
 	qplib_qp->qp_handle = ureq.qp_handle;
@@ -953,10 +951,7 @@ static int bnxt_re_init_user_qp(struct b
 		if (IS_ERR(umem))
 			goto rqfail;
 		qp->rumem = umem;
-		qplib_qp->rq.sg_info.sghead = umem->sg_head.sgl;
-		qplib_qp->rq.sg_info.npages =
-			ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-		qplib_qp->rq.sg_info.nmap = umem->nmap;
+		qplib_qp->rq.sg_info.umem = umem;
 		qplib_qp->rq.sg_info.pgsize = PAGE_SIZE;
 		qplib_qp->rq.sg_info.pgshft = PAGE_SHIFT;
 	}
@@ -1609,9 +1604,7 @@ static int bnxt_re_init_user_srq(struct
 		return PTR_ERR(umem);
 
 	srq->umem = umem;
-	qplib_srq->sg_info.sghead = umem->sg_head.sgl;
-	qplib_srq->sg_info.npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE);
-	qplib_srq->sg_info.nmap = umem->nmap;
+	qplib_srq->sg_info.umem = umem;
 	qplib_srq->sg_info.pgsize = PAGE_SIZE;
 	qplib_srq->sg_info.pgshft = PAGE_SHIFT;
 	qplib_srq->srq_handle = ureq.srq_handle;
@@ -2861,10 +2854,7 @@ int bnxt_re_create_cq(struct ib_cq *ibcq
 			rc = PTR_ERR(cq->umem);
 			goto fail;
 		}
-		cq->qplib_cq.sg_info.sghead = cq->umem->sg_head.sgl;
-		cq->qplib_cq.sg_info.npages =
-			ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE);
-		cq->qplib_cq.sg_info.nmap = cq->umem->nmap;
+		cq->qplib_cq.sg_info.umem = cq->umem;
 		cq->qplib_cq.dpi = &uctx->dpi;
 	} else {
 		cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL);
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -45,6 +45,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_umem.h>
+
 #include "roce_hsi.h"
 #include "qplib_res.h"
 #include "qplib_sp.h"
@@ -87,12 +90,11 @@ static void __free_pbl(struct bnxt_qplib
 static void bnxt_qplib_fill_user_dma_pages(struct bnxt_qplib_pbl *pbl,
 					   struct bnxt_qplib_sg_info *sginfo)
 {
-	struct scatterlist *sghead = sginfo->sghead;
-	struct sg_dma_page_iter sg_iter;
+	struct ib_block_iter biter;
 	int i = 0;
 
-	for_each_sg_dma_page(sghead, &sg_iter, sginfo->nmap, 0) {
-		pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
+	rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) {
+		pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter);
 		pbl->pg_arr[i] = NULL;
 		pbl->pg_count++;
 		i++;
@@ -104,15 +106,16 @@ static int __alloc_pbl(struct bnxt_qplib
 		       struct bnxt_qplib_sg_info *sginfo)
 {
 	struct pci_dev *pdev = res->pdev;
-	struct scatterlist *sghead;
 	bool is_umem = false;
 	u32 pages;
 	int i;
 
 	if (sginfo->nopte)
 		return 0;
-	pages = sginfo->npages;
-	sghead = sginfo->sghead;
+	if (sginfo->umem)
+		pages = ib_umem_num_dma_blocks(sginfo->umem, sginfo->pgsize);
+	else
+		pages = sginfo->npages;
 	/* page ptr arrays */
 	pbl->pg_arr = vmalloc(pages * sizeof(void *));
 	if (!pbl->pg_arr)
@@ -127,7 +130,7 @@ static int __alloc_pbl(struct bnxt_qplib
 	pbl->pg_count = 0;
 	pbl->pg_size = sginfo->pgsize;
 
-	if (!sghead) {
+	if (!sginfo->umem) {
 		for (i = 0; i < pages; i++) {
 			pbl->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
 							    pbl->pg_size,
@@ -183,14 +186,12 @@ int bnxt_qplib_alloc_init_hwq(struct bnx
 	struct bnxt_qplib_sg_info sginfo = {};
 	u32 depth, stride, npbl, npde;
 	dma_addr_t *src_phys_ptr, **dst_virt_ptr;
-	struct scatterlist *sghead = NULL;
 	struct bnxt_qplib_res *res;
 	struct pci_dev *pdev;
 	int i, rc, lvl;
 
 	res = hwq_attr->res;
 	pdev = res->pdev;
-	sghead = hwq_attr->sginfo->sghead;
 	pg_size = hwq_attr->sginfo->pgsize;
 	hwq->level = PBL_LVL_MAX;
 
@@ -204,7 +205,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnx
 			aux_pages++;
 	}
 
-	if (!sghead) {
+	if (!hwq_attr->sginfo->umem) {
 		hwq->is_user = false;
 		npages = (depth * stride) / pg_size + aux_pages;
 		if ((depth * stride) % pg_size)
@@ -213,11 +214,14 @@ int bnxt_qplib_alloc_init_hwq(struct bnx
 			return -EINVAL;
 		hwq_attr->sginfo->npages = npages;
 	} else {
+		unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
+			hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
+
 		hwq->is_user = true;
-		npages = hwq_attr->sginfo->npages;
+		npages = sginfo_num_pages;
 		npages = (npages * PAGE_SIZE) /
 			  BIT_ULL(hwq_attr->sginfo->pgshft);
-		if ((hwq_attr->sginfo->npages * PAGE_SIZE) %
+		if ((sginfo_num_pages * PAGE_SIZE) %
 		     BIT_ULL(hwq_attr->sginfo->pgshft))
 			if (!npages)
 				npages++;
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -126,8 +126,7 @@ struct bnxt_qplib_pbl {
 };
 
 struct bnxt_qplib_sg_info {
-	struct scatterlist		*sghead;
-	u32				nmap;
+	struct ib_umem			*umem;
 	u32				npages;
 	u32				pgshft;
 	u32				pgsize;