Blob Blame History Raw
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 25 Jan 2017 16:34:27 +0100
Subject: radix-tree: use local locks
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
Git-commit: 681d8e4c840d1d7e110811df6d286f61becb09e4
Patch-mainline: Queued in subsystem maintainer repository
References: SLE Realtime Extension

The preload functionality uses per-CPU variables and preempt-disable to
ensure that it does not switch CPUs during its usage. This patch adds
local_locks() instead preempt_disable() for the same purpose and to
remain preemptible on -RT.

Cc: stable-rt@vger.kernel.org
Reported-and-debugged-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
---
 include/linux/idr.h        |    5 +----
 include/linux/radix-tree.h |    7 ++-----
 lib/radix-tree.c           |   32 +++++++++++++++++++++++---------
 3 files changed, 26 insertions(+), 18 deletions(-)

--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -115,10 +115,7 @@ static inline bool idr_is_empty(const st
  * Each idr_preload() should be matched with an invocation of this
  * function.  See idr_preload() for details.
  */
-static inline void idr_preload_end(void)
-{
-	preempt_enable();
-}
+void idr_preload_end(void);
 
 /**
  * idr_find() - Return pointer for given ID.
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -327,6 +327,8 @@ unsigned int radix_tree_gang_lookup_slot
 int radix_tree_preload(gfp_t gfp_mask);
 int radix_tree_maybe_preload(gfp_t gfp_mask);
 int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
+void radix_tree_preload_end(void);
+
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *,
 			unsigned long index, unsigned int tag);
@@ -346,11 +348,6 @@ unsigned int radix_tree_gang_lookup_tag_
 		unsigned int max_items, unsigned int tag);
 int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
 
-static inline void radix_tree_preload_end(void)
-{
-	preempt_enable();
-}
-
 int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
 int radix_tree_split(struct radix_tree_root *, unsigned long index,
 			unsigned new_order);
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -38,7 +38,7 @@
 #include <linux/rcupdate.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-
+#include <linux/locallock.h>
 
 /* Number of nodes in fully populated tree of given height */
 static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
@@ -87,6 +87,7 @@ struct radix_tree_preload {
 	struct radix_tree_node *nodes;
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
 
 static inline struct radix_tree_node *entry_to_node(void *ptr)
 {
@@ -405,12 +406,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, st
 		 * succeed in getting a node here (and never reach
 		 * kmem_cache_alloc)
 		 */
-		rtp = this_cpu_ptr(&radix_tree_preloads);
+		rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
 		if (rtp->nr) {
 			ret = rtp->nodes;
 			rtp->nodes = ret->parent;
 			rtp->nr--;
 		}
+		put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
 		/*
 		 * Update the allocation stack trace as this is more useful
 		 * for debugging.
@@ -476,14 +478,14 @@ static __must_check int __radix_tree_pre
 	 */
 	gfp_mask &= ~__GFP_ACCOUNT;
 
-	preempt_disable();
+	local_lock(radix_tree_preloads_lock);
 	rtp = this_cpu_ptr(&radix_tree_preloads);
 	while (rtp->nr < nr) {
-		preempt_enable();
+		local_unlock(radix_tree_preloads_lock);
 		node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 		if (node == NULL)
 			goto out;
-		preempt_disable();
+		local_lock(radix_tree_preloads_lock);
 		rtp = this_cpu_ptr(&radix_tree_preloads);
 		if (rtp->nr < nr) {
 			node->parent = rtp->nodes;
@@ -525,7 +527,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
 	if (gfpflags_allow_blocking(gfp_mask))
 		return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
 	/* Preloading doesn't help anything with this gfp mask, skip it */
-	preempt_disable();
+	local_lock(radix_tree_preloads_lock);
 	return 0;
 }
 EXPORT_SYMBOL(radix_tree_maybe_preload);
@@ -563,7 +565,7 @@ int radix_tree_maybe_preload_order(gfp_t
 
 	/* Preloading doesn't help anything with this gfp mask, skip it */
 	if (!gfpflags_allow_blocking(gfp_mask)) {
-		preempt_disable();
+		local_lock(radix_tree_preloads_lock);
 		return 0;
 	}
 
@@ -597,6 +599,12 @@ int radix_tree_maybe_preload_order(gfp_t
 	return __radix_tree_preload(gfp_mask, nr_nodes);
 }
 
+void radix_tree_preload_end(void)
+{
+	local_unlock(radix_tree_preloads_lock);
+}
+EXPORT_SYMBOL(radix_tree_preload_end);
+
 static unsigned radix_tree_load_root(const struct radix_tree_root *root,
 		struct radix_tree_node **nodep, unsigned long *maxindex)
 {
@@ -2102,10 +2110,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
 void idr_preload(gfp_t gfp_mask)
 {
 	if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
-		preempt_disable();
+		local_lock(radix_tree_preloads_lock);
 }
 EXPORT_SYMBOL(idr_preload);
 
+void idr_preload_end(void)
+{
+	local_unlock(radix_tree_preloads_lock);
+}
+EXPORT_SYMBOL(idr_preload_end);
+
 /**
  * ida_pre_get - reserve resources for ida allocation
  * @ida: ida handle
@@ -2122,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
 	 * to return to the ida_pre_get() step.
 	 */
 	if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
-		preempt_enable();
+		local_unlock(radix_tree_preloads_lock);
 
 	if (!this_cpu_read(ida_bitmap)) {
 		struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);