Blob Blame History Raw
From: Marc Zyngier <marc.zyngier@arm.com>
Date: Tue, 1 Aug 2017 09:02:55 +0100
Subject: jump_label: Split out code under the hotplug lock
Patch-mainline: v4.14-rc1
Git-commit: 8b7b412807053ab5f059ffae426a280e769a5bda
References: bsc#1109837

In order to later introduce an "already locked" version of some
of the static key funcions, let's split the code into the core stuff
(the *_cpuslocked functions) and the usual helpers, which now
take/release the hotplug lock and call into the _cpuslocked
versions.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arm-kernel@lists.infradead.org
Link: http://lkml.kernel.org/r/20170801080257.5056-3-marc.zyngier@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
---
 kernel/jump_label.c |   28 +++++++++++++++++++---------
 1 file changed, 19 insertions(+), 9 deletions(-)

--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -79,11 +79,10 @@ int static_key_count(struct static_key *
 }
 EXPORT_SYMBOL_GPL(static_key_count);
 
-void static_key_slow_inc(struct static_key *key)
+static void static_key_slow_inc_cpuslocked(struct static_key *key)
 {
 	int v, v1;
 
-	cpus_read_lock();
 	STATIC_KEY_CHECK_USE();
 
 	/*
@@ -100,10 +99,8 @@ void static_key_slow_inc(struct static_k
 	 */
 	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
 		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
-		if (likely(v1 == v)) {
-			cpus_read_unlock();
+		if (likely(v1 == v))
 			return;
-		}
 	}
 
 	jump_label_lock();
@@ -115,6 +112,12 @@ void static_key_slow_inc(struct static_k
 		atomic_inc(&key->enabled);
 	}
 	jump_label_unlock();
+}
+
+void static_key_slow_inc(struct static_key *key)
+{
+	cpus_read_lock();
+	static_key_slow_inc_cpuslocked(key);
 	cpus_read_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -156,10 +159,10 @@ void static_key_disable(struct static_ke
 }
 EXPORT_SYMBOL_GPL(static_key_disable);
 
-static void __static_key_slow_dec(struct static_key *key,
-		unsigned long rate_limit, struct delayed_work *work)
+static void static_key_slow_dec_cpuslocked(struct static_key *key,
+					   unsigned long rate_limit,
+					   struct delayed_work *work)
 {
-	cpus_read_lock();
 	/*
 	 * The negative count check is valid even when a negative
 	 * key->enabled is in use by static_key_slow_inc(); a
@@ -170,7 +173,6 @@ static void __static_key_slow_dec(struct
 	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 		WARN(atomic_read(&key->enabled) < 0,
 		     "jump label: negative count!\n");
-		cpus_read_unlock();
 		return;
 	}
 
@@ -181,6 +183,14 @@ static void __static_key_slow_dec(struct
 		jump_label_update(key);
 	}
 	jump_label_unlock();
+}
+
+static void __static_key_slow_dec(struct static_key *key,
+				  unsigned long rate_limit,
+				  struct delayed_work *work)
+{
+	cpus_read_lock();
+	static_key_slow_dec_cpuslocked(key, rate_limit, work);
 	cpus_read_unlock();
 }