linux/debian/patches/features/all/rt/Revert-random-invalidate-ba...

163 lines
5.2 KiB
Diff

From 8adeebf2a94f4625c39c25ec461d0d2ab623b3ad Mon Sep 17 00:00:00 2001
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 14 Jun 2017 21:29:16 +0200
Subject: [PATCH] Revert "random: invalidate batched entropy after crng init"
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz
This reverts commit 86f95e53ed76fec2579e00351c6050ab398a7730.
In -RT lockdep complains with
| -> #1 (primary_crng.lock){+.+...}:
| lock_acquire+0xb5/0x2b0
| rt_spin_lock+0x46/0x50
| _extract_crng+0x39/0xa0
| extract_crng+0x3a/0x40
| get_random_u64+0x17a/0x200
| cache_random_seq_create+0x51/0x100
| init_cache_random_seq+0x35/0x90
| __kmem_cache_create+0xd3/0x560
| create_boot_cache+0x8c/0xb2
| create_kmalloc_cache+0x54/0x9f
| create_kmalloc_caches+0xe3/0xfd
| kmem_cache_init+0x14f/0x1f0
| start_kernel+0x1e7/0x3b3
| x86_64_start_reservations+0x2a/0x2c
| x86_64_start_kernel+0x13d/0x14c
| verify_cpu+0x0/0xfc
|
| -> #0 (batched_entropy_reset_lock){+.+...}:
| __lock_acquire+0x11b4/0x1320
| lock_acquire+0xb5/0x2b0
| rt_write_lock+0x26/0x40
| rt_write_lock_irqsave+0x9/0x10
| invalidate_batched_entropy+0x28/0xb0
| crng_fast_load+0xb5/0xe0
| add_interrupt_randomness+0x16c/0x1a0
| irq_thread+0x15c/0x1e0
| kthread+0x112/0x150
| ret_from_fork+0x31/0x40
so revert this for now and check later with upstream.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/char/random.c | 37 -------------------------------------
1 file changed, 37 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,9 +1,6 @@
/*
* random.c -- A strong random number generator
*
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
- * Rights Reserved.
- *
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
*
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -765,8 +762,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
static struct crng_state **crng_node_pool __read_mostly;
#endif
-static void invalidate_batched_entropy(void);
-
static void crng_initialize(struct crng_state *crng)
{
int i;
@@ -804,7 +799,6 @@ static int crng_fast_load(const char *cp
cp++; crng_init_cnt++; len--;
}
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
- invalidate_batched_entropy();
crng_init = 1;
wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n");
@@ -842,7 +836,6 @@ static void crng_reseed(struct crng_stat
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
if (crng == &primary_crng && crng_init < 2) {
- invalidate_batched_entropy();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
@@ -2023,7 +2016,6 @@ struct batched_entropy {
};
unsigned int position;
};
-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
/*
* Get a random word for internal kernel use only. The quality of the random
@@ -2034,8 +2026,6 @@ static DEFINE_PER_CPU(struct batched_ent
u64 get_random_u64(void)
{
u64 ret;
- bool use_lock = crng_init < 2;
- unsigned long flags;
struct batched_entropy *batch;
#if BITS_PER_LONG == 64
@@ -2048,15 +2038,11 @@ u64 get_random_u64(void)
#endif
batch = &get_cpu_var(batched_entropy_u64);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
put_cpu_var(batched_entropy_u64);
return ret;
}
@@ -2066,45 +2052,22 @@ static DEFINE_PER_CPU(struct batched_ent
u32 get_random_u32(void)
{
u32 ret;
- bool use_lock = crng_init < 2;
- unsigned long flags;
struct batched_entropy *batch;
if (arch_get_random_int(&ret))
return ret;
batch = &get_cpu_var(batched_entropy_u32);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
put_cpu_var(batched_entropy_u32);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
-/* It's important to invalidate all potential batched entropy that might
- * be stored before the crng is initialized, which we can do lazily by
- * simply resetting the counter to zero so that it's re-extracted on the
- * next usage. */
-static void invalidate_batched_entropy(void)
-{
- int cpu;
- unsigned long flags;
-
- write_lock_irqsave(&batched_entropy_reset_lock, flags);
- for_each_possible_cpu (cpu) {
- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
- }
- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-}
-
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.