Blob Blame History Raw
From: Jason Gunthorpe <jgg@mellanox.com>
Date: Sun, 16 Sep 2018 20:48:11 +0300
Subject: RDMA/umem: Avoid synchronize_srcu in the ODP MR destruction path
Patch-mainline: v4.20-rc1
Git-commit: 56ac9dd9177ce451ac8176311915b29e8b5f0ac2
References: bsc#1103992 FATE#326009

synchronize_rcu is slow enough that it should be avoided on the syscall
path when user space is destroying MRs. After all the rework we can now
trivially do this by having call_srcu kfree the per_mm.

Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 drivers/infiniband/core/umem_odp.c |   10 ++++++++--
 include/rdma/ib_umem_odp.h         |    1 +
 2 files changed, 9 insertions(+), 2 deletions(-)

--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -286,6 +286,11 @@ found:
 	return 0;
 }
 
+static void free_per_mm(struct rcu_head *rcu)
+{
+	kfree(container_of(rcu, struct ib_ucontext_per_mm, rcu));
+}
+
 void put_per_mm(struct ib_umem_odp *umem_odp)
 {
 	struct ib_ucontext_per_mm *per_mm = umem_odp->per_mm;
@@ -313,9 +318,10 @@ void put_per_mm(struct ib_umem_odp *umem
 	per_mm->active = false;
 	up_write(&per_mm->umem_rwsem);
 
-	mmu_notifier_unregister(&per_mm->mn, per_mm->mm);
+	WARN_ON(!RB_EMPTY_ROOT(&per_mm->umem_tree));
+	mmu_notifier_unregister_no_release(&per_mm->mn, per_mm->mm);
 	put_pid(per_mm->tgid);
-	kfree(per_mm);
+	mmu_notifier_call_srcu(&per_mm->rcu, free_per_mm);
 }
 
 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -99,6 +99,7 @@ struct ib_ucontext_per_mm {
 	unsigned int odp_mrs_count;
 
 	struct list_head ucontext_list;
+	struct rcu_head rcu;
 };
 
 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);