Blob Blame History Raw
From: Jason Gunthorpe <jgg@mellanox.com>
Date: Tue, 11 Jun 2019 13:09:51 -0300
Subject: RDMA/odp: Fix missed unlock in non-blocking invalidate_start
Patch-mainline: v5.3-rc1
Git-commit: 7608bf40cf2480057ec0da31456cc428791c32ef
References: bsc#1103992 FATE#326009

If invalidate_start returns with EAGAIN then the umem_rwsem needs to be
unlocked as no invalidate_end will be called.

Cc: <stable@vger.kernel.org>
Fixes: ca748c39ea3f ("RDMA/umem: Get rid of per_mm->notifier_count")
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/core/umem_odp.c |    9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -145,6 +145,7 @@ static void ib_umem_notifier_invalidate_
 {
 	struct ib_ucontext_per_mm *per_mm =
 		container_of(mn, struct ib_ucontext_per_mm, mn);
+	int rc;
 
 	down_read(&per_mm->umem_rwsem);
 	if (!per_mm->active) {
@@ -157,8 +158,12 @@ static void ib_umem_notifier_invalidate_
 		return;
 	}
 
-	rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
-				      invalidate_range_start_trampoline, NULL);
+	rc = rbt_ib_umem_for_each_in_range(&per_mm->umem_tree, start, end,
+					   invalidate_range_start_trampoline,
+					   NULL);
+	if (rc)
+		up_read(&per_mm->umem_rwsem);
+	return;
 }
 
 static int invalidate_range_end_trampoline(struct ib_umem_odp *item, u64 start,