Matt Fleming 668ba0
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Matt Fleming 668ba0
Date: Fri, 12 May 2017 15:46:17 +0200
Matt Fleming 668ba0
Subject: random: avoid preempt_disable()ed section
Matt Fleming 668ba0
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
Matt Fleming 668ba0
Git-commit: 4bed11300e24d5178829758e535cc4996490b7c8
Matt Fleming 668ba0
Patch-mainline: Queued in subsystem maintainer repository
Matt Fleming 668ba0
References: SLE Realtime Extension
Matt Fleming 668ba0
Matt Fleming 668ba0
extract_crng() will use sleeping locks while in a preempt_disable()
Matt Fleming 668ba0
section due to get_cpu_var().
Matt Fleming 668ba0
Work around it with local_locks.
Matt Fleming 668ba0
Matt Fleming 668ba0
Cc: stable-rt@vger.kernel.org # where it applies to
Matt Fleming 668ba0
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Matt Fleming 668ba0
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
Matt Fleming 668ba0
---
Matt Fleming 668ba0
 drivers/char/random.c |   11 +++++++----
Matt Fleming 668ba0
 1 file changed, 7 insertions(+), 4 deletions(-)
Matt Fleming 668ba0
Matt Fleming 668ba0
--- a/drivers/char/random.c
Matt Fleming 668ba0
+++ b/drivers/char/random.c
Matt Fleming c02cd3
@@ -263,6 +263,7 @@
Matt Fleming 668ba0
 #include <linux/syscalls.h>
Matt Fleming 668ba0
 #include <linux/completion.h>
Matt Fleming 668ba0
 #include <linux/uuid.h>
Matt Fleming 668ba0
+#include <linux/locallock.h>
Matt Fleming 668ba0
 #include <crypto/chacha20.h>
Matt Fleming 668ba0
 
Matt Fleming 668ba0
 #include <asm/processor.h>
Mel Gorman 888edd
@@ -2191,35 +2192,37 @@ struct batched_entropy {
Matt Fleming c02cd3
  * point prior.
Matt Fleming 668ba0
  */
Matt Fleming 668ba0
 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
Matt Fleming 668ba0
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
Matt Fleming 668ba0
 u64 get_random_u64(void)
Matt Fleming 668ba0
 {
Matt Fleming 668ba0
 	u64 ret;
Matt Fleming c02cd3
 	struct batched_entropy *batch;
Matt Fleming 668ba0
 
Matt Fleming 668ba0
-	batch = &get_cpu_var(batched_entropy_u64);
Matt Fleming 668ba0
+	batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
Matt Fleming 668ba0
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
Matt Fleming 668ba0
 		extract_crng((u8 *)batch->entropy_u64);
Matt Fleming 668ba0
 		batch->position = 0;
Matt Fleming 668ba0
 	}
Matt Fleming 668ba0
 	ret = batch->entropy_u64[batch->position++];
Matt Fleming 668ba0
-	put_cpu_var(batched_entropy_u64);
Matt Fleming 668ba0
+	put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
Matt Fleming 668ba0
 	return ret;
Matt Fleming 668ba0
 }
Matt Fleming 668ba0
 EXPORT_SYMBOL(get_random_u64);
Matt Fleming 668ba0
 
Matt Fleming 668ba0
 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
Matt Fleming 668ba0
+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
Matt Fleming 668ba0
 u32 get_random_u32(void)
Matt Fleming 668ba0
 {
Matt Fleming 668ba0
 	u32 ret;
Matt Fleming c02cd3
 	struct batched_entropy *batch;
Matt Fleming 668ba0
 
Matt Fleming 668ba0
-	batch = &get_cpu_var(batched_entropy_u32);
Matt Fleming 668ba0
+	batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
Matt Fleming 668ba0
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
Matt Fleming 668ba0
 		extract_crng((u8 *)batch->entropy_u32);
Matt Fleming 668ba0
 		batch->position = 0;
Matt Fleming 668ba0
 	}
Matt Fleming 668ba0
 	ret = batch->entropy_u32[batch->position++];
Matt Fleming 668ba0
-	put_cpu_var(batched_entropy_u32);
Matt Fleming 668ba0
+	put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
Matt Fleming 668ba0
 	return ret;
Matt Fleming 668ba0
 }
Matt Fleming 668ba0
 EXPORT_SYMBOL(get_random_u32);