Skip to content

Commit 876736a

Browse files
Sebastian Andrzej Siewiorgregkh
Sebastian Andrzej Siewior
authored andcommitted
random: add a spinlock_t to struct batched_entropy
[ Upstream commit b7d5dc2 ] The per-CPU variable batched_entropy_uXX is protected by get_cpu_var(). This is just a preempt_disable() which ensures that the variable is only from the local CPU. It does not protect against users on the same CPU from another context. It is possible that a preemptible context reads slot 0 and then an interrupt occurs and the same value is read again. The above scenario is confirmed by lockdep if we add a spinlock: | ================================ | WARNING: inconsistent lock state | 5.1.0-rc3+ #42 Not tainted | -------------------------------- | inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. | ksoftirqd/9/56 [HC0[0]:SC1[1]:HE0:SE0] takes: | (____ptrval____) (batched_entropy_u32.lock){+.?.}, at: get_random_u32+0x3e/0xe0 | {SOFTIRQ-ON-W} state was registered at: | _raw_spin_lock+0x2a/0x40 | get_random_u32+0x3e/0xe0 | new_slab+0x15c/0x7b0 | ___slab_alloc+0x492/0x620 | __slab_alloc.isra.73+0x53/0xa0 | kmem_cache_alloc_node+0xaf/0x2a0 | copy_process.part.41+0x1e1/0x2370 | _do_fork+0xdb/0x6d0 | kernel_thread+0x20/0x30 | kthreadd+0x1ba/0x220 | ret_from_fork+0x3a/0x50 … | other info that might help us debug this: | Possible unsafe locking scenario: | | CPU0 | ---- | lock(batched_entropy_u32.lock); | <Interrupt> | lock(batched_entropy_u32.lock); | | *** DEADLOCK *** | | stack backtrace: | Call Trace: … | kmem_cache_alloc_trace+0x20e/0x270 | ipmi_alloc_recv_msg+0x16/0x40 … | __do_softirq+0xec/0x48d | run_ksoftirqd+0x37/0x60 | smpboot_thread_fn+0x191/0x290 | kthread+0xfe/0x130 | ret_from_fork+0x3a/0x50 Add a spinlock_t to the batched_entropy data structure and acquire the lock while accessing it. Acquire the lock with disabled interrupts because this function may be used from interrupt context. Remove the batched_entropy_reset_lock lock. Now that we have a lock for the data scructure, we can access it from a remote CPU. Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> Signed-off-by: Sasha Levin <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent 44ea43b commit 876736a

File tree

1 file changed

+27
-25
lines changed

1 file changed

+27
-25
lines changed

drivers/char/random.c

Lines changed: 27 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -2228,8 +2228,8 @@ struct batched_entropy {
22282228
u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
22292229
};
22302230
unsigned int position;
2231+
spinlock_t batch_lock;
22312232
};
2232-
static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
22332233

22342234
/*
22352235
* Get a random word for internal kernel use only. The quality of the random
@@ -2239,12 +2239,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
22392239
* wait_for_random_bytes() should be called and return 0 at least once
22402240
* at any point prior.
22412241
*/
2242-
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2242+
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2243+
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2244+
};
2245+
22432246
u64 get_random_u64(void)
22442247
{
22452248
u64 ret;
2246-
bool use_lock;
2247-
unsigned long flags = 0;
2249+
unsigned long flags;
22482250
struct batched_entropy *batch;
22492251
static void *previous;
22502252

@@ -2259,28 +2261,25 @@ u64 get_random_u64(void)
22592261

22602262
warn_unseeded_randomness(&previous);
22612263

2262-
use_lock = READ_ONCE(crng_init) < 2;
2263-
batch = &get_cpu_var(batched_entropy_u64);
2264-
if (use_lock)
2265-
read_lock_irqsave(&batched_entropy_reset_lock, flags);
2264+
batch = raw_cpu_ptr(&batched_entropy_u64);
2265+
spin_lock_irqsave(&batch->batch_lock, flags);
22662266
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
22672267
extract_crng((u8 *)batch->entropy_u64);
22682268
batch->position = 0;
22692269
}
22702270
ret = batch->entropy_u64[batch->position++];
2271-
if (use_lock)
2272-
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2273-
put_cpu_var(batched_entropy_u64);
2271+
spin_unlock_irqrestore(&batch->batch_lock, flags);
22742272
return ret;
22752273
}
22762274
EXPORT_SYMBOL(get_random_u64);
22772275

2278-
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2276+
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2277+
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2278+
};
22792279
u32 get_random_u32(void)
22802280
{
22812281
u32 ret;
2282-
bool use_lock;
2283-
unsigned long flags = 0;
2282+
unsigned long flags;
22842283
struct batched_entropy *batch;
22852284
static void *previous;
22862285

@@ -2289,18 +2288,14 @@ u32 get_random_u32(void)
22892288

22902289
warn_unseeded_randomness(&previous);
22912290

2292-
use_lock = READ_ONCE(crng_init) < 2;
2293-
batch = &get_cpu_var(batched_entropy_u32);
2294-
if (use_lock)
2295-
read_lock_irqsave(&batched_entropy_reset_lock, flags);
2291+
batch = raw_cpu_ptr(&batched_entropy_u32);
2292+
spin_lock_irqsave(&batch->batch_lock, flags);
22962293
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
22972294
extract_crng((u8 *)batch->entropy_u32);
22982295
batch->position = 0;
22992296
}
23002297
ret = batch->entropy_u32[batch->position++];
2301-
if (use_lock)
2302-
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2303-
put_cpu_var(batched_entropy_u32);
2298+
spin_unlock_irqrestore(&batch->batch_lock, flags);
23042299
return ret;
23052300
}
23062301
EXPORT_SYMBOL(get_random_u32);
@@ -2314,12 +2309,19 @@ static void invalidate_batched_entropy(void)
23142309
int cpu;
23152310
unsigned long flags;
23162311

2317-
write_lock_irqsave(&batched_entropy_reset_lock, flags);
23182312
for_each_possible_cpu (cpu) {
2319-
per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2320-
per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2313+
struct batched_entropy *batched_entropy;
2314+
2315+
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2316+
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2317+
batched_entropy->position = 0;
2318+
spin_unlock(&batched_entropy->batch_lock);
2319+
2320+
batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2321+
spin_lock(&batched_entropy->batch_lock);
2322+
batched_entropy->position = 0;
2323+
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
23212324
}
2322-
write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
23232325
}
23242326

23252327
/**

0 commit comments

Comments
 (0)