Skip to content

Commit b7d5dc2

Browse files
Sebastian Andrzej Siewiortytso
Sebastian Andrzej Siewior
authored andcommitted
random: add a spinlock_t to struct batched_entropy
The per-CPU variable batched_entropy_uXX is protected by get_cpu_var(). This is just a preempt_disable() which ensures that the variable is only from the local CPU. It does not protect against users on the same CPU from another context. It is possible that a preemptible context reads slot 0 and then an interrupt occurs and the same value is read again. The above scenario is confirmed by lockdep if we add a spinlock: | ================================ | WARNING: inconsistent lock state | 5.1.0-rc3+ torvalds#42 Not tainted | -------------------------------- | inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage. | ksoftirqd/9/56 [HC0[0]:SC1[1]:HE0:SE0] takes: | (____ptrval____) (batched_entropy_u32.lock){+.?.}, at: get_random_u32+0x3e/0xe0 | {SOFTIRQ-ON-W} state was registered at: | _raw_spin_lock+0x2a/0x40 | get_random_u32+0x3e/0xe0 | new_slab+0x15c/0x7b0 | ___slab_alloc+0x492/0x620 | __slab_alloc.isra.73+0x53/0xa0 | kmem_cache_alloc_node+0xaf/0x2a0 | copy_process.part.41+0x1e1/0x2370 | _do_fork+0xdb/0x6d0 | kernel_thread+0x20/0x30 | kthreadd+0x1ba/0x220 | ret_from_fork+0x3a/0x50 … | other info that might help us debug this: | Possible unsafe locking scenario: | | CPU0 | ---- | lock(batched_entropy_u32.lock); | <Interrupt> | lock(batched_entropy_u32.lock); | | *** DEADLOCK *** | | stack backtrace: | Call Trace: … | kmem_cache_alloc_trace+0x20e/0x270 | ipmi_alloc_recv_msg+0x16/0x40 … | __do_softirq+0xec/0x48d | run_ksoftirqd+0x37/0x60 | smpboot_thread_fn+0x191/0x290 | kthread+0xfe/0x130 | ret_from_fork+0x3a/0x50 Add a spinlock_t to the batched_entropy data structure and acquire the lock while accessing it. Acquire the lock with disabled interrupts because this function may be used from interrupt context. Remove the batched_entropy_reset_lock lock. Now that we have a lock for the data scructure, we can access it from a remote CPU. Signed-off-by: Sebastian Andrzej Siewior <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]>
1 parent 92e507d commit b7d5dc2

File tree

1 file changed

+27
-25
lines changed

1 file changed

+27
-25
lines changed

drivers/char/random.c

+27-25
Original file line numberDiff line numberDiff line change
@@ -2282,8 +2282,8 @@ struct batched_entropy {
22822282
u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
22832283
};
22842284
unsigned int position;
2285+
spinlock_t batch_lock;
22852286
};
2286-
static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
22872287

22882288
/*
22892289
* Get a random word for internal kernel use only. The quality of the random
@@ -2293,12 +2293,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
22932293
* wait_for_random_bytes() should be called and return 0 at least once
22942294
* at any point prior.
22952295
*/
2296-
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2296+
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2297+
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2298+
};
2299+
22972300
u64 get_random_u64(void)
22982301
{
22992302
u64 ret;
2300-
bool use_lock;
2301-
unsigned long flags = 0;
2303+
unsigned long flags;
23022304
struct batched_entropy *batch;
23032305
static void *previous;
23042306

@@ -2313,28 +2315,25 @@ u64 get_random_u64(void)
23132315

23142316
warn_unseeded_randomness(&previous);
23152317

2316-
use_lock = READ_ONCE(crng_init) < 2;
2317-
batch = &get_cpu_var(batched_entropy_u64);
2318-
if (use_lock)
2319-
read_lock_irqsave(&batched_entropy_reset_lock, flags);
2318+
batch = raw_cpu_ptr(&batched_entropy_u64);
2319+
spin_lock_irqsave(&batch->batch_lock, flags);
23202320
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
23212321
extract_crng((u8 *)batch->entropy_u64);
23222322
batch->position = 0;
23232323
}
23242324
ret = batch->entropy_u64[batch->position++];
2325-
if (use_lock)
2326-
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2327-
put_cpu_var(batched_entropy_u64);
2325+
spin_unlock_irqrestore(&batch->batch_lock, flags);
23282326
return ret;
23292327
}
23302328
EXPORT_SYMBOL(get_random_u64);
23312329

2332-
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2330+
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2331+
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2332+
};
23332333
u32 get_random_u32(void)
23342334
{
23352335
u32 ret;
2336-
bool use_lock;
2337-
unsigned long flags = 0;
2336+
unsigned long flags;
23382337
struct batched_entropy *batch;
23392338
static void *previous;
23402339

@@ -2343,18 +2342,14 @@ u32 get_random_u32(void)
23432342

23442343
warn_unseeded_randomness(&previous);
23452344

2346-
use_lock = READ_ONCE(crng_init) < 2;
2347-
batch = &get_cpu_var(batched_entropy_u32);
2348-
if (use_lock)
2349-
read_lock_irqsave(&batched_entropy_reset_lock, flags);
2345+
batch = raw_cpu_ptr(&batched_entropy_u32);
2346+
spin_lock_irqsave(&batch->batch_lock, flags);
23502347
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
23512348
extract_crng((u8 *)batch->entropy_u32);
23522349
batch->position = 0;
23532350
}
23542351
ret = batch->entropy_u32[batch->position++];
2355-
if (use_lock)
2356-
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2357-
put_cpu_var(batched_entropy_u32);
2352+
spin_unlock_irqrestore(&batch->batch_lock, flags);
23582353
return ret;
23592354
}
23602355
EXPORT_SYMBOL(get_random_u32);
@@ -2368,12 +2363,19 @@ static void invalidate_batched_entropy(void)
23682363
int cpu;
23692364
unsigned long flags;
23702365

2371-
write_lock_irqsave(&batched_entropy_reset_lock, flags);
23722366
for_each_possible_cpu (cpu) {
2373-
per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2374-
per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2367+
struct batched_entropy *batched_entropy;
2368+
2369+
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2370+
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2371+
batched_entropy->position = 0;
2372+
spin_unlock(&batched_entropy->batch_lock);
2373+
2374+
batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2375+
spin_lock(&batched_entropy->batch_lock);
2376+
batched_entropy->position = 0;
2377+
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
23752378
}
2376-
write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
23772379
}
23782380

23792381
/**

0 commit comments

Comments
 (0)