Blob Blame History Raw
From: Yangyang Li <liyangyang20@huawei.com>
Date: Thu, 19 Aug 2021 09:36:18 +0800
Subject: RDMA/hns: Use IDA interface to manage uar index
Patch-mainline: v5.15-rc1
Git-commit: 8feafd9017ba5b01c3ea256b59ac2c867a762659
References: bsc#1190336

Switch uar index allocation and release from hns' own bitmap interface to
IDA interface.

Link: https://lore.kernel.org/r/1629336980-17499-2-git-send-email-liangwenpeng@huawei.com
Signed-off-by: Yangyang Li <liyangyang20@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/hns/hns_roce_alloc.c  |    2 -
 drivers/infiniband/hw/hns/hns_roce_device.h |    6 +----
 drivers/infiniband/hw/hns/hns_roce_main.c   |   14 ++++--------
 drivers/infiniband/hw/hns/hns_roce_pd.c     |   31 ++++++++++++----------------
 4 files changed, 22 insertions(+), 31 deletions(-)

--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -253,5 +253,5 @@ void hns_roce_cleanup_bitmap(struct hns_
 	hns_roce_cleanup_cq_table(hr_dev);
 	ida_destroy(&hr_dev->mr_table.mtpt_ida.ida);
 	ida_destroy(&hr_dev->pd_ida.ida);
-	hns_roce_cleanup_uar_table(hr_dev);
+	ida_destroy(&hr_dev->uar_ida.ida);
 }
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -963,7 +963,7 @@ struct hns_roce_dev {
 	struct hns_roce_cmdq	cmd;
 	struct hns_roce_ida pd_ida;
 	struct hns_roce_ida xrcd_ida;
-	struct hns_roce_uar_table uar_table;
+	struct hns_roce_ida uar_ida;
 	struct hns_roce_mr_table  mr_table;
 	struct hns_roce_cq_table  cq_table;
 	struct hns_roce_srq_table srq_table;
@@ -1118,10 +1118,8 @@ static inline u8 get_tclass(const struct
 	       grh->traffic_class >> DSCP_SHIFT : grh->traffic_class;
 }
 
-int hns_roce_init_uar_table(struct hns_roce_dev *dev);
+void hns_roce_init_uar_table(struct hns_roce_dev *dev);
 int hns_roce_uar_alloc(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
-void hns_roce_uar_free(struct hns_roce_dev *dev, struct hns_roce_uar *uar);
-void hns_roce_cleanup_uar_table(struct hns_roce_dev *dev);
 
 int hns_roce_cmd_init(struct hns_roce_dev *hr_dev);
 void hns_roce_cmd_cleanup(struct hns_roce_dev *hr_dev);
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -325,7 +325,7 @@ static int hns_roce_alloc_ucontext(struc
 	return 0;
 
 error_fail_copy_to_udata:
-	hns_roce_uar_free(hr_dev, &context->uar);
+	ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 
 error_fail_uar_alloc:
 	return ret;
@@ -334,8 +334,9 @@ error_fail_uar_alloc:
 static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
 {
 	struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
+	struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
 
-	hns_roce_uar_free(to_hr_dev(ibcontext->device), &context->uar);
+	ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
 }
 
 static int hns_roce_mmap(struct ib_ucontext *context,
@@ -737,11 +738,7 @@ static int hns_roce_setup_hca(struct hns
 		mutex_init(&hr_dev->pgdir_mutex);
 	}
 
-	ret = hns_roce_init_uar_table(hr_dev);
-	if (ret) {
-		dev_err(dev, "Failed to initialize uar table. aborting\n");
-		return ret;
-	}
+	hns_roce_init_uar_table(hr_dev);
 
 	ret = hns_roce_uar_alloc(hr_dev, &hr_dev->priv_uar);
 	if (ret) {
@@ -780,10 +777,9 @@ err_qp_table_free:
 		ida_destroy(&hr_dev->xrcd_ida.ida);
 
 	ida_destroy(&hr_dev->pd_ida.ida);
-	hns_roce_uar_free(hr_dev, &hr_dev->priv_uar);
 
 err_uar_table_free:
-	hns_roce_cleanup_uar_table(hr_dev);
+	ida_destroy(&hr_dev->uar_ida.ida);
 	return ret;
 }
 
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -85,13 +85,18 @@ int hns_roce_dealloc_pd(struct ib_pd *pd
 
 int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
 {
+	struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
 	struct resource *res;
-	int ret;
+	int id;
 
 	/* Using bitmap to manager UAR index */
-	ret = hns_roce_bitmap_alloc(&hr_dev->uar_table.bitmap, &uar->logic_idx);
-	if (ret)
+	id = ida_alloc_range(&uar_ida->ida, uar_ida->min, uar_ida->max,
+			     GFP_KERNEL);
+	if (id < 0) {
+		ibdev_err(&hr_dev->ib_dev, "failed to alloc uar id(%d).\n", id);
 		return -ENOMEM;
+	}
+	uar->logic_idx = (unsigned long)id;
 
 	if (uar->logic_idx > 0 && hr_dev->caps.phy_num_uars > 1)
 		uar->index = (uar->logic_idx - 1) %
@@ -102,6 +107,7 @@ int hns_roce_uar_alloc(struct hns_roce_d
 	if (!dev_is_pci(hr_dev->dev)) {
 		res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
 		if (!res) {
+			ida_free(&uar_ida->ida, id);
 			dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
 			return -EINVAL;
 		}
@@ -114,22 +120,13 @@ int hns_roce_uar_alloc(struct hns_roce_d
 	return 0;
 }
 
-void hns_roce_uar_free(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
-{
-	hns_roce_bitmap_free(&hr_dev->uar_table.bitmap, uar->logic_idx);
-}
-
-int hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
+void hns_roce_init_uar_table(struct hns_roce_dev *hr_dev)
 {
-	return hns_roce_bitmap_init(&hr_dev->uar_table.bitmap,
-				    hr_dev->caps.num_uars,
-				    hr_dev->caps.num_uars - 1,
-				    hr_dev->caps.reserved_uars, 0);
-}
+	struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
 
-void hns_roce_cleanup_uar_table(struct hns_roce_dev *hr_dev)
-{
-	hns_roce_bitmap_cleanup(&hr_dev->uar_table.bitmap);
+	ida_init(&uar_ida->ida);
+	uar_ida->max = hr_dev->caps.num_uars - 1;
+	uar_ida->min = hr_dev->caps.reserved_uars;
 }
 
 static int hns_roce_xrcd_alloc(struct hns_roce_dev *hr_dev, u32 *xrcdn)