Matt Fleming 668ba0
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Matt Fleming 668ba0
Date: Wed, 14 Jun 2017 21:29:16 +0200
Matt Fleming 668ba0
Subject: Revert "random: invalidate batched entropy after crng init"
Matt Fleming 668ba0
Git-repo: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git
Matt Fleming 668ba0
Git-commit: b0f2b48d4542c390ccc5e0f68b36e1c583ce8772
Matt Fleming 668ba0
Patch-mainline: Queued in subsystem maintainer repository
Matt Fleming 668ba0
References: SLE Realtime Extension
Matt Fleming 668ba0
Matt Fleming 668ba0
This reverts commit 86f95e53ed76fec2579e00351c6050ab398a7730.
Matt Fleming 668ba0
Matt Fleming 668ba0
In -RT lockdep complains with
Matt Fleming 668ba0
| -> #1 (primary_crng.lock){+.+...}:
Matt Fleming 668ba0
|        lock_acquire+0xb5/0x2b0
Matt Fleming 668ba0
|        rt_spin_lock+0x46/0x50
Matt Fleming 668ba0
|        _extract_crng+0x39/0xa0
Matt Fleming 668ba0
|        extract_crng+0x3a/0x40
Matt Fleming 668ba0
|        get_random_u64+0x17a/0x200
Matt Fleming 668ba0
|        cache_random_seq_create+0x51/0x100
Matt Fleming 668ba0
|        init_cache_random_seq+0x35/0x90
Matt Fleming 668ba0
|        __kmem_cache_create+0xd3/0x560
Matt Fleming 668ba0
|        create_boot_cache+0x8c/0xb2
Matt Fleming 668ba0
|        create_kmalloc_cache+0x54/0x9f
Matt Fleming 668ba0
|        create_kmalloc_caches+0xe3/0xfd
Matt Fleming 668ba0
|        kmem_cache_init+0x14f/0x1f0
Matt Fleming 668ba0
|        start_kernel+0x1e7/0x3b3
Matt Fleming 668ba0
|        x86_64_start_reservations+0x2a/0x2c
Matt Fleming 668ba0
|        x86_64_start_kernel+0x13d/0x14c
Matt Fleming 668ba0
|        verify_cpu+0x0/0xfc
Matt Fleming 668ba0
|
Matt Fleming 668ba0
| -> #0 (batched_entropy_reset_lock){+.+...}:
Matt Fleming 668ba0
|        __lock_acquire+0x11b4/0x1320
Matt Fleming 668ba0
|        lock_acquire+0xb5/0x2b0
Matt Fleming 668ba0
|        rt_write_lock+0x26/0x40
Matt Fleming 668ba0
|        rt_write_lock_irqsave+0x9/0x10
Matt Fleming 668ba0
|        invalidate_batched_entropy+0x28/0xb0
Matt Fleming 668ba0
|        crng_fast_load+0xb5/0xe0
Matt Fleming 668ba0
|        add_interrupt_randomness+0x16c/0x1a0
Matt Fleming 668ba0
|        irq_thread+0x15c/0x1e0
Matt Fleming 668ba0
|        kthread+0x112/0x150
Matt Fleming 668ba0
|        ret_from_fork+0x31/0x40
Matt Fleming 668ba0
Matt Fleming 668ba0
so revert this for now and check later with upstream.
Matt Fleming 668ba0
Matt Fleming 668ba0
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Matt Fleming 668ba0
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
Matt Fleming 668ba0
---
Matt Fleming 668ba0
 drivers/char/random.c |   37 -------------------------------------
Matt Fleming 668ba0
 1 file changed, 37 deletions(-)
Matt Fleming 668ba0
Matt Fleming 668ba0
--- a/drivers/char/random.c
Matt Fleming 668ba0
+++ b/drivers/char/random.c
Matt Fleming 668ba0
@@ -1,9 +1,6 @@
Matt Fleming 668ba0
 /*
Matt Fleming 668ba0
  * random.c -- A strong random number generator
Matt Fleming 668ba0
  *
Matt Fleming 668ba0
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
Matt Fleming 668ba0
- * Rights Reserved.
Matt Fleming 668ba0
- *
Matt Fleming 668ba0
  * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
Matt Fleming 668ba0
  *
Matt Fleming 668ba0
  * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.  All
Mel Gorman 888edd
@@ -780,8 +777,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
Matt Fleming 668ba0
 static struct crng_state **crng_node_pool __read_mostly;
Matt Fleming 668ba0
 #endif
Matt Fleming 668ba0
 
Matt Fleming 668ba0
-static void invalidate_batched_entropy(void);
Matt Fleming 668ba0
-
Matt Fleming 668ba0
 static void crng_initialize(struct crng_state *crng)
Matt Fleming 668ba0
 {
Matt Fleming 668ba0
 	int		i;
Mel Gorman 888edd
@@ -864,7 +859,6 @@ static void crng_finalize_init(struct cr
Mel Gorman 888edd
 		return;
Mel Gorman 888edd
 	}
Mel Gorman 888edd
 
Mel Gorman 888edd
-	invalidate_batched_entropy();
Mel Gorman 888edd
 	numa_crng_init();
Mel Gorman 888edd
 	crng_init = 2;
Mel Gorman 888edd
 	process_random_ready_list();
Mel Gorman 888edd
@@ -904,7 +898,6 @@ static int crng_fast_load(const char *cp
Matt Fleming 668ba0
 	}
Matt Fleming 668ba0
 	spin_unlock_irqrestore(&primary_crng.lock, flags);
Matt Fleming 668ba0
 	if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
Matt Fleming 668ba0
-		invalidate_batched_entropy();
Matt Fleming 668ba0
 		crng_init = 1;
Matt Fleming 668ba0
 		wake_up_interruptible(&crng_init_wait);
Matt Fleming 668ba0
 		pr_notice("random: fast init done\n");
Mel Gorman 888edd
@@ -2189,7 +2182,6 @@ struct batched_entropy {
Matt Fleming 668ba0
 	};
Matt Fleming 668ba0
 	unsigned int position;
Matt Fleming 668ba0
 };
Matt Fleming 668ba0
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
Matt Fleming 668ba0
 
Matt Fleming 668ba0
 /*
Matt Fleming 668ba0
  * Get a random word for internal kernel use only. The quality of the random
Mel Gorman 888edd
@@ -2203,20 +2195,14 @@ static DEFINE_PER_CPU(struct batched_ent
Matt Fleming 668ba0
 u64 get_random_u64(void)
Matt Fleming 668ba0
 {
Matt Fleming 668ba0
 	u64 ret;
Matt Fleming 668ba0
-	bool use_lock = READ_ONCE(crng_init) < 2;
Matt Fleming 668ba0
-	unsigned long flags = 0;
Matt Fleming 668ba0
 	struct batched_entropy *batch;
Matt Fleming 668ba0
 
Matt Fleming 668ba0
 	batch = &get_cpu_var(batched_entropy_u64);
Matt Fleming 668ba0
-	if (use_lock)
Matt Fleming 668ba0
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
Matt Fleming 668ba0
 	if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
Matt Fleming 668ba0
 		extract_crng((u8 *)batch->entropy_u64);
Matt Fleming 668ba0
 		batch->position = 0;
Matt Fleming 668ba0
 	}
Matt Fleming 668ba0
 	ret = batch->entropy_u64[batch->position++];
Matt Fleming 668ba0
-	if (use_lock)
Matt Fleming 668ba0
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
Matt Fleming 668ba0
 	put_cpu_var(batched_entropy_u64);
Matt Fleming 668ba0
 	return ret;
Matt Fleming 668ba0
 }
Mel Gorman 888edd
@@ -2226,42 +2212,19 @@ static DEFINE_PER_CPU(struct batched_ent
Matt Fleming 668ba0
 u32 get_random_u32(void)
Matt Fleming 668ba0
 {
Matt Fleming 668ba0
 	u32 ret;
Matt Fleming 668ba0
-	bool use_lock = READ_ONCE(crng_init) < 2;
Matt Fleming 668ba0
-	unsigned long flags = 0;
Matt Fleming 668ba0
 	struct batched_entropy *batch;
Matt Fleming 668ba0
 
Matt Fleming 668ba0
 	batch = &get_cpu_var(batched_entropy_u32);
Matt Fleming 668ba0
-	if (use_lock)
Matt Fleming 668ba0
-		read_lock_irqsave(&batched_entropy_reset_lock, flags);
Matt Fleming 668ba0
 	if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
Matt Fleming 668ba0
 		extract_crng((u8 *)batch->entropy_u32);
Matt Fleming 668ba0
 		batch->position = 0;
Matt Fleming 668ba0
 	}
Matt Fleming 668ba0
 	ret = batch->entropy_u32[batch->position++];
Matt Fleming 668ba0
-	if (use_lock)
Matt Fleming 668ba0
-		read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
Matt Fleming 668ba0
 	put_cpu_var(batched_entropy_u32);
Matt Fleming 668ba0
 	return ret;
Matt Fleming 668ba0
 }
Matt Fleming 668ba0
 EXPORT_SYMBOL(get_random_u32);
Matt Fleming 668ba0
 
Matt Fleming 668ba0
-/* It's important to invalidate all potential batched entropy that might
Matt Fleming 668ba0
- * be stored before the crng is initialized, which we can do lazily by
Matt Fleming 668ba0
- * simply resetting the counter to zero so that it's re-extracted on the
Matt Fleming 668ba0
- * next usage. */
Matt Fleming 668ba0
-static void invalidate_batched_entropy(void)
Matt Fleming 668ba0
-{
Matt Fleming 668ba0
-	int cpu;
Matt Fleming 668ba0
-	unsigned long flags;
Matt Fleming 668ba0
-
Matt Fleming 668ba0
-	write_lock_irqsave(&batched_entropy_reset_lock, flags);
Matt Fleming 668ba0
-	for_each_possible_cpu (cpu) {
Matt Fleming 668ba0
-		per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
Matt Fleming 668ba0
-		per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
Matt Fleming 668ba0
-	}
Matt Fleming 668ba0
-	write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
Matt Fleming 668ba0
-}
Matt Fleming 668ba0
-
Matt Fleming 668ba0
 /**
Matt Fleming 668ba0
  * randomize_page - Generate a random, page aligned address
Matt Fleming 668ba0
  * @start:	The smallest acceptable address the caller will take.