Blob Blame History Raw
From: YueHaibing <yuehaibing@huawei.com>
Date: Sun, 3 Jun 2018 17:32:22 +0800
Subject: IB/hns: Use zeroing memory allocator instead of allocator/memset
Patch-mainline: v4.18-rc1
Git-commit: 8c61b24585c44e1de337e45858129abce9c3a008
References: bsc#1104427 FATE#326416

Use dma_zalloc_coherent for allocating zeroed memory and
remove unnecessary memset function.

Signed-off-by: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/hw/hns/hns_roce_alloc.c |    8 +++-----
 1 file changed, 3 insertions(+), 5 deletions(-)

--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -197,7 +197,8 @@ int hns_roce_buf_alloc(struct hns_roce_d
 		buf->npages = 1 << order;
 		buf->page_shift = page_shift;
 		/* MTT PA must be recorded in 4k alignment, t is 4k aligned */
-		buf->direct.buf = dma_alloc_coherent(dev, size, &t, GFP_KERNEL);
+		buf->direct.buf = dma_zalloc_coherent(dev,
+						      size, &t, GFP_KERNEL);
 		if (!buf->direct.buf)
 			return -ENOMEM;
 
@@ -207,8 +208,6 @@ int hns_roce_buf_alloc(struct hns_roce_d
 			--buf->page_shift;
 			buf->npages *= 2;
 		}
-
-		memset(buf->direct.buf, 0, size);
 	} else {
 		buf->nbufs = (size + page_size - 1) / page_size;
 		buf->npages = buf->nbufs;
@@ -220,7 +219,7 @@ int hns_roce_buf_alloc(struct hns_roce_d
 			return -ENOMEM;
 
 		for (i = 0; i < buf->nbufs; ++i) {
-			buf->page_list[i].buf = dma_alloc_coherent(dev,
+			buf->page_list[i].buf = dma_zalloc_coherent(dev,
 								  page_size, &t,
 								  GFP_KERNEL);
 
@@ -228,7 +227,6 @@ int hns_roce_buf_alloc(struct hns_roce_d
 				goto err_free;
 
 			buf->page_list[i].map = t;
-			memset(buf->page_list[i].buf, 0, page_size);
 		}
 	}