Blob Blame History Raw
From: Weihang Li <liweihang@huawei.com>
Date: Wed, 20 May 2020 21:53:16 +0800
Subject: RDMA/hns: Change all page_shift to unsigned
Patch-mainline: v5.8-rc1
Git-commit: 82d07a4e466fa2e3cc0ac5479beeb739abaa7438
References: jsc#SLE-14777

page_shift is used to calculate the page size, it's always non-negative,
and should be in type of unsigned.

Link: https://lore.kernel.org/r/1589982799-28728-7-git-send-email-liweihang@huawei.com
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/hns/hns_roce_alloc.c  |    2 +-
 drivers/infiniband/hw/hns/hns_roce_device.h |   25 +++++++++++++------------
 drivers/infiniband/hw/hns/hns_roce_hem.c    |    2 +-
 drivers/infiniband/hw/hns/hns_roce_hem.h    |    2 +-
 drivers/infiniband/hw/hns/hns_roce_mr.c     |   20 +++++++++++---------
 5 files changed, 27 insertions(+), 24 deletions(-)

--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -254,7 +254,7 @@ int hns_roce_get_kmem_bufs(struct hns_ro
 
 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
 			   int buf_cnt, int start, struct ib_umem *umem,
-			   int page_shift)
+			   unsigned int page_shift)
 {
 	struct ib_block_iter biter;
 	int total = 0;
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -342,7 +342,7 @@ struct hns_roce_buf_attr {
 		int	hopnum; /* multi-hop addressing hop num */
 	} region[HNS_ROCE_MAX_BT_REGION];
 	int region_count; /* valid region count */
-	int page_shift;  /* buffer page shift */
+	unsigned int page_shift;  /* buffer page shift */
 	bool fixed_page; /* decide page shift is fixed-size or maximum size */
 	int user_access; /* umem access flag */
 	bool mtt_only; /* only alloc buffer-required MTT memory */
@@ -351,14 +351,14 @@ struct hns_roce_buf_attr {
 /* memory translate region */
 struct hns_roce_mtr {
 	struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
-	struct ib_umem		 *umem; /* user space buffer */
-	struct hns_roce_buf	 *kmem; /* kernel space buffer */
+	struct ib_umem		*umem; /* user space buffer */
+	struct hns_roce_buf	*kmem; /* kernel space buffer */
 	struct {
-		dma_addr_t	 root_ba; /* root BA table's address */
-		bool		 is_direct; /* addressing without BA table */
-		int		 ba_pg_shift; /* BA table page shift */
-		int		 buf_pg_shift; /* buffer page shift */
-		int		 buf_pg_count;  /* buffer page count */
+		dma_addr_t	root_ba; /* root BA table's address */
+		bool		is_direct; /* addressing without BA table */
+		unsigned int	ba_pg_shift; /* BA table page shift */
+		unsigned int	buf_pg_shift; /* buffer page shift */
+		int		buf_pg_count;  /* buffer page count */
 	} hem_cfg; /* config for hardware addressing */
 };
 
@@ -423,7 +423,7 @@ struct hns_roce_buf {
 	struct hns_roce_buf_list	*page_list;
 	u32				npages;
 	u32				size;
-	int				page_shift;
+	unsigned int			page_shift;
 };
 
 struct hns_roce_db_pgdir {
@@ -1139,8 +1139,9 @@ void hns_roce_cmd_use_polling(struct hns
 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
 		      int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
-			struct hns_roce_buf_attr *buf_attr, int page_shift,
-			struct ib_udata *udata, unsigned long user_addr);
+			struct hns_roce_buf_attr *buf_attr,
+			unsigned int page_shift, struct ib_udata *udata,
+			unsigned long user_addr);
 void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev,
 			  struct hns_roce_mtr *mtr);
 int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
@@ -1210,7 +1211,7 @@ int hns_roce_get_kmem_bufs(struct hns_ro
 			   int buf_cnt, int start, struct hns_roce_buf *buf);
 int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
 			   int buf_cnt, int start, struct ib_umem *umem,
-			   int page_shift);
+			   unsigned int page_shift);
 
 int hns_roce_create_srq(struct ib_srq *srq,
 			struct ib_srq_init_attr *srq_init_attr,
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -1400,7 +1400,7 @@ err_exit:
 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_hem_list *hem_list,
 			      const struct hns_roce_buf_region *regions,
-			      int region_cnt, int bt_pg_shift)
+			      int region_cnt, unsigned int bt_pg_shift)
 {
 	const struct hns_roce_buf_region *r;
 	int ofs, end;
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -133,7 +133,7 @@ int hns_roce_hem_list_calc_root_ba(const
 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
 			      struct hns_roce_hem_list *hem_list,
 			      const struct hns_roce_buf_region *regions,
-			      int region_cnt, int bt_pg_shift);
+			      int region_cnt, unsigned int bt_pg_shift);
 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
 			       struct hns_roce_hem_list *hem_list);
 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -706,7 +706,8 @@ static inline size_t mtr_bufs_size(struc
 	return size;
 }
 
-static inline int mtr_umem_page_count(struct ib_umem *umem, int page_shift)
+static inline int mtr_umem_page_count(struct ib_umem *umem,
+				      unsigned int page_shift)
 {
 	int count = ib_umem_page_count(umem);
 
@@ -719,7 +720,7 @@ static inline int mtr_umem_page_count(st
 }
 
 static inline size_t mtr_kmem_direct_size(bool is_direct, size_t alloc_size,
-					  int page_shift)
+					  unsigned int page_shift)
 {
 	if (is_direct)
 		return ALIGN(alloc_size, 1 << page_shift);
@@ -732,7 +733,7 @@ static inline size_t mtr_kmem_direct_siz
  * Returns 0 on success, or the error page num.
  */
 static inline int mtr_check_direct_pages(dma_addr_t *pages, int page_count,
-					 int page_shift)
+					 unsigned int page_shift)
 {
 	size_t page_size = 1 << page_shift;
 	int i;
@@ -765,8 +766,8 @@ static int mtr_alloc_bufs(struct hns_roc
 			  struct ib_udata *udata, unsigned long user_addr)
 {
 	struct ib_device *ibdev = &hr_dev->ib_dev;
-	int max_pg_shift = buf_attr->page_shift;
-	int best_pg_shift = 0;
+	unsigned int max_pg_shift = buf_attr->page_shift;
+	unsigned int best_pg_shift = 0;
 	int all_pg_count = 0;
 	size_t direct_size;
 	size_t total_size;
@@ -836,7 +837,7 @@ err_alloc_mem:
 }
 
 static int mtr_get_pages(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
-			 dma_addr_t *pages, int count, int page_shift)
+			 dma_addr_t *pages, int count, unsigned int page_shift)
 {
 	struct ib_device *ibdev = &hr_dev->ib_dev;
 	int npage;
@@ -946,7 +947,7 @@ done:
 /* convert buffer size to page index and page count */
 static int mtr_init_region(struct hns_roce_buf_attr *attr, int page_cnt,
 			   struct hns_roce_buf_region *regions, int region_cnt,
-			   int page_shift)
+			   unsigned int page_shift)
 {
 	unsigned int page_size = 1 << page_shift;
 	int max_region = attr->region_count;
@@ -977,8 +978,9 @@ static int mtr_init_region(struct hns_ro
  * @buf_alloced: mtr has private buffer, true means need to alloc
  */
 int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
-			struct hns_roce_buf_attr *buf_attr, int page_shift,
-			struct ib_udata *udata, unsigned long user_addr)
+			struct hns_roce_buf_attr *buf_attr,
+			unsigned int page_shift, struct ib_udata *udata,
+			unsigned long user_addr)
 {
 	struct hns_roce_buf_region regions[HNS_ROCE_MAX_BT_REGION] = {};
 	struct ib_device *ibdev = &hr_dev->ib_dev;