Blob Blame History Raw
From: Leon Romanovsky <leonro@mellanox.com>
Date: Tue, 10 Jul 2018 13:31:49 +0300
Subject: RDMA/umem: Refactor exit paths in ib_umem_get
Patch-mainline: v4.19-rc1
Git-commit: 1215cb7c88ec888d599a249142a74dd93b8985ad
References: bsc#1103992 FATE#326009

Simplify exit paths in ib_umem_get to use the standard goto unwind
pattern.

Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/core/umem.c |   44 ++++++++++++++++++-----------------------
 1 file changed, 20 insertions(+), 24 deletions(-)

--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -91,7 +91,6 @@ struct ib_umem *ib_umem_get(struct ib_uc
 	int i;
 	unsigned long dma_attrs = 0;
 	struct scatterlist *sg, *sg_list_start;
-	int need_release = 0;
 	unsigned int gup_flags = FOLL_WRITE;
 
 	if (dmasync)
@@ -120,10 +119,8 @@ struct ib_umem *ib_umem_get(struct ib_uc
 
 	if (access & IB_ACCESS_ON_DEMAND) {
 		ret = ib_umem_odp_get(context, umem, access);
-		if (ret) {
-			kfree(umem);
-			return ERR_PTR(ret);
-		}
+		if (ret)
+			goto umem_kfree;
 		return umem;
 	}
 
@@ -134,8 +131,8 @@ struct ib_umem *ib_umem_get(struct ib_uc
 
 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
 	if (!page_list) {
-		kfree(umem);
-		return ERR_PTR(-ENOMEM);
+		ret = -ENOMEM;
+		goto umem_kfree;
 	}
 
 	/*
@@ -155,7 +152,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
 	if ((current->mm->pinned_vm > lock_limit) && !capable(CAP_IPC_LOCK)) {
 		up_write(&current->mm->mmap_sem);
 		ret = -ENOMEM;
-		goto out;
+		goto vma;
 	}
 	up_write(&current->mm->mmap_sem);
 
@@ -163,17 +160,16 @@ struct ib_umem *ib_umem_get(struct ib_uc
 
 	if (npages == 0 || npages > UINT_MAX) {
 		ret = -EINVAL;
-		goto out;
+		goto vma;
 	}
 
 	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
 	if (ret)
-		goto out;
+		goto vma;
 
 	if (!umem->writable)
 		gup_flags |= FOLL_FORCE;
 
-	need_release = 1;
 	sg_list_start = umem->sg_head.sgl;
 
 	down_read(&current->mm->mmap_sem);
@@ -184,7 +180,7 @@ struct ib_umem *ib_umem_get(struct ib_uc
 				     gup_flags, page_list, vma_list);
 		if (ret < 0) {
 			up_read(&current->mm->mmap_sem);
-			goto out;
+			goto umem_release;
 		}
 
 		umem->npages += ret;
@@ -211,26 +207,26 @@ struct ib_umem *ib_umem_get(struct ib_uc
 
 	if (!umem->nmap) {
 		ret = -ENOMEM;
-		goto out;
+		goto umem_release;
 	}
 
 	ret = 0;
+	goto out;
 
+umem_release:
+	__ib_umem_release(context->device, umem, 0);
+vma:
+	down_write(&current->mm->mmap_sem);
+	current->mm->pinned_vm -= ib_umem_num_pages(umem);
+	up_write(&current->mm->mmap_sem);
 out:
-	if (ret < 0) {
-		down_write(&current->mm->mmap_sem);
-		current->mm->pinned_vm -= ib_umem_num_pages(umem);
-		up_write(&current->mm->mmap_sem);
-		if (need_release)
-			__ib_umem_release(context->device, umem, 0);
-		kfree(umem);
-	}
-
 	if (vma_list)
 		free_page((unsigned long) vma_list);
 	free_page((unsigned long) page_list);
-
-	return ret < 0 ? ERR_PTR(ret) : umem;
+umem_kfree:
+	if (ret)
+		kfree(umem);
+	return ret ? ERR_PTR(ret) : umem;
 }
 EXPORT_SYMBOL(ib_umem_get);