Blob Blame History Raw
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 14 Jun 2017 21:29:16 +0200
Subject: Revert "random: invalidate batched entropy after crng init"
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
Git-commit: b0f2b48d4542c390ccc5e0f68b36e1c583ce8772
Patch-mainline: Queued in subsystem maintainer repository
References: SLE Realtime Extension

This reverts commit 86f95e53ed76fec2579e00351c6050ab398a7730.

In -RT lockdep complains with
| -> #1 (primary_crng.lock){+.+...}:
|        lock_acquire+0xb5/0x2b0
|        rt_spin_lock+0x46/0x50
|        _extract_crng+0x39/0xa0
|        extract_crng+0x3a/0x40
|        get_random_u64+0x17a/0x200
|        cache_random_seq_create+0x51/0x100
|        init_cache_random_seq+0x35/0x90
|        __kmem_cache_create+0xd3/0x560
|        create_boot_cache+0x8c/0xb2
|        create_kmalloc_cache+0x54/0x9f
|        create_kmalloc_caches+0xe3/0xfd
|        kmem_cache_init+0x14f/0x1f0
|        start_kernel+0x1e7/0x3b3
|        x86_64_start_reservations+0x2a/0x2c
|        x86_64_start_kernel+0x13d/0x14c
|        verify_cpu+0x0/0xfc
|
| -> #0 (batched_entropy_reset_lock){+.+...}:
|        __lock_acquire+0x11b4/0x1320
|        lock_acquire+0xb5/0x2b0
|        rt_write_lock+0x26/0x40
|        rt_write_lock_irqsave+0x9/0x10
|        invalidate_batched_entropy+0x28/0xb0
|        crng_fast_load+0xb5/0xe0
|        add_interrupt_randomness+0x16c/0x1a0
|        irq_thread+0x15c/0x1e0
|        kthread+0x112/0x150
|        ret_from_fork+0x31/0x40

so revert this for now and check later with upstream.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
---
 drivers/char/random.c |   37 -------------------------------------
 1 file changed, 37 deletions(-)

--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,9 +1,6 @@
 /*
  * random.c -- A strong random number generator
  *
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
- *
  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
  *
  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
@@ -780,8 +777,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
 static struct crng_state **crng_node_pool __read_mostly;
 #endif
 
-static void invalidate_batched_entropy(void);
-
 static void crng_initialize(struct crng_state *crng)
 {
 	int		i;
@@ -864,7 +859,6 @@ static void crng_finalize_init(struct cr
 		return;
 	}
 
-	invalidate_batched_entropy();
 	numa_crng_init();
 	crng_init = 2;
 	process_random_ready_list();
@@ -904,7 +898,6 @@ static int crng_fast_load(const char *cp
 	}
 	spin_unlock_irqrestore(&primary_crng.lock, flags);
 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
-		invalidate_batched_entropy();
 		crng_init = 1;
 		wake_up_interruptible(&crng_init_wait);
 		pr_notice("random: fast init done\n");
@@ -2189,7 +2182,6 @@ struct batched_entropy {
 	};
 	unsigned int position;
 };
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
 
 /*
  * Get a random word for internal kernel use only. The quality of the random
@@ -2203,20 +2195,14 @@ static DEFINE_PER_CPU(struct batched_ent
 u64 get_random_u64(void)
 {
 	u64 ret;
-	bool use_lock = READ_ONCE(crng_init) < 2;
-	unsigned long flags = 0;
 	struct batched_entropy *batch;
 
 	batch = &get_cpu_var(batched_entropy_u64);
-	if (use_lock)
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
 		extract_crng((u8 *)batch->entropy_u64);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u64[batch->position++];
-	if (use_lock)
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 	put_cpu_var(batched_entropy_u64);
 	return ret;
 }
@@ -2226,42 +2212,19 @@ static DEFINE_PER_CPU(struct batched_ent
 u32 get_random_u32(void)
 {
 	u32 ret;
-	bool use_lock = READ_ONCE(crng_init) < 2;
-	unsigned long flags = 0;
 	struct batched_entropy *batch;
 
 	batch = &get_cpu_var(batched_entropy_u32);
-	if (use_lock)
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
 		extract_crng((u8 *)batch->entropy_u32);
 		batch->position = 0;
 	}
 	ret = batch->entropy_u32[batch->position++];
-	if (use_lock)
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
 	put_cpu_var(batched_entropy_u32);
 	return ret;
 }
 EXPORT_SYMBOL(get_random_u32);
 
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
-static void invalidate_batched_entropy(void)
-{
-	int cpu;
-	unsigned long flags;
-
-	write_lock_irqsave(&batched_entropy_reset_lock, flags);
-	for_each_possible_cpu (cpu) {
-		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
-		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
-	}
-	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-}
-
 /**
  * randomize_page - Generate a random, page aligned address
  * @start:	The smallest acceptable address the caller will take.