Blob Blame History Raw
From: Doug Ledford <dledford@redhat.com>
Date: Fri, 21 Sep 2018 11:30:12 -0400
Subject: RDMA/umem: Minor optimizations
Patch-mainline: v4.20-rc1
Git-commit: 3312d1c6bdee6aa912c099c0ac0662d197c52842
References: bsc#1103992 FATE#326009

Noticed while reviewing commit d4b4dd1b9706 ("RDMA/umem: Do not use
current->tgid to track the mm_struct") patch.  Why would we take a lock,
adjust a protected variable, drop the lock, and *then* check the input
into our protected variable adjustment?  Then we have to take the lock
again on our error unwind.  Let's just check the input early and skip
taking the locks needlessly if the input isn't valid.

It was also noticed that we set mm = current->mm, we then never modify
mm, but we still go back and reference current->mm a number of times
needlessly.  Be consistent in using the stored reference in mm.

Signed-off-by: Doug Ledford <dledford@redhat.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/core/umem.c |   15 +++++++--------
 1 file changed, 7 insertions(+), 8 deletions(-)

--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -152,6 +152,10 @@ struct ib_umem *ib_umem_get(struct ib_uc
 		umem->hugetlb = 0;
 
 	npages = ib_umem_num_pages(umem);
+	if (npages == 0 || npages > UINT_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
 
 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 
@@ -166,11 +170,6 @@ struct ib_umem *ib_umem_get(struct ib_uc
 
 	cur_base = addr & PAGE_MASK;
 
-	if (npages == 0 || npages > UINT_MAX) {
-		ret = -EINVAL;
-		goto vma;
-	}
-
 	ret = sg_alloc_table(&umem->sg_head, npages, GFP_KERNEL);
 	if (ret)
 		goto vma;
@@ -224,9 +223,9 @@ struct ib_umem *ib_umem_get(struct ib_uc
 umem_release:
 	__ib_umem_release(context->device, umem, 0);
 vma:
-	down_write(&current->mm->mmap_sem);
-	current->mm->pinned_vm -= ib_umem_num_pages(umem);
-	up_write(&current->mm->mmap_sem);
+	down_write(&mm->mmap_sem);
+	mm->pinned_vm -= ib_umem_num_pages(umem);
+	up_write(&mm->mmap_sem);
 out:
 	if (vma_list)
 		free_page((unsigned long) vma_list);