Skip to content

Commit 3a49e3b

Browse files
amlutozx2c4
authored andcommitted
random: remove batched entropy locking
We don't need spinlocks to protect batched entropy -- all we need is a little bit of care. This should fix up the following splat that Jonathan received with a PROVE_LOCKING=y/PROVE_RAW_LOCK_NESTING=y kernel: [ 2.500000] [ BUG: Invalid wait context ] [ 2.500000] 5.17.0-rc1 torvalds#563 Not tainted [ 2.500000] ----------------------------- [ 2.500000] swapper/1 is trying to lock: [ 2.500000] c0b0e9cc (batched_entropy_u32.lock){....}-{3:3}, at: invalidate_batched_entropy+0x18/0x4c [ 2.500000] other info that might help us debug this: [ 2.500000] context-{2:2} [ 2.500000] 3 locks held by swapper/1: [ 2.500000] #0: c0ae86ac (event_mutex){+.+.}-{4:4}, at: event_trace_init+0x4c/0xd8 [ 2.500000] #1: c0ae81b8 (trace_event_sem){+.+.}-{4:4}, at: event_trace_init+0x68/0xd8 [ 2.500000] #2: c19b05cc (&sb->s_type->i_mutex_key#2){+.+.}-{4:4}, at: start_creating+0x40/0xc4 [ 2.500000] stack backtrace: [ 2.500000] CPU: 0 PID: 1 Comm: swapper Not tainted 5.17.0-rc1 torvalds#563 [ 2.500000] Hardware name: WPCM450 chip [ 2.500000] [<c00100a8>] (unwind_backtrace) from [<c000db2c>] (show_stack+0x10/0x14) [ 2.500000] [<c000db2c>] (show_stack) from [<c0054eec>] (__lock_acquire+0x3f0/0x189c) [ 2.500000] [<c0054eec>] (__lock_acquire) from [<c0054478>] (lock_acquire+0x2b8/0x354) [ 2.500000] [<c0054478>] (lock_acquire) from [<c0568028>] (_raw_spin_lock_irqsave+0x60/0x74) [ 2.500000] [<c0568028>] (_raw_spin_lock_irqsave) from [<c030b6f4>] (invalidate_batched_entropy+0x18/0x4c) [ 2.500000] [<c030b6f4>] (invalidate_batched_entropy) from [<c030e7fc>] (crng_fast_load+0xf0/0x110) [ 2.500000] [<c030e7fc>] (crng_fast_load) from [<c030e954>] (add_interrupt_randomness+0x138/0x200) [ 2.500000] [<c030e954>] (add_interrupt_randomness) from [<c0061b34>] (handle_irq_event_percpu+0x18/0x38) [ 2.500000] [<c0061b34>] (handle_irq_event_percpu) from [<c0061b8c>] (handle_irq_event+0x38/0x5c) [ 2.500000] [<c0061b8c>] (handle_irq_event) from [<c0065b28>] (handle_fasteoi_irq+0x9c/0x114) [ 2.500000] [<c0065b28>] (handle_fasteoi_irq) from [<c0061178>] (handle_irq_desc+0x24/0x34) [ 2.500000] [<c0061178>] (handle_irq_desc) from [<c056214c>] (generic_handle_arch_irq+0x28/0x3c) [ 2.500000] [<c056214c>] (generic_handle_arch_irq) from [<c0008eb4>] (__irq_svc+0x54/0x80) [ 2.500000] Exception stack(0xc1485d48 to 0xc1485d90) [ 2.500000] 5d40: 9780e804 00000001 c09413d4 200000d3 60000053 c016af54 [ 2.500000] 5d60: 00000000 c0afa5b8 c14194e0 c19a1d48 c0789ce0 00000000 c1490480 c1485d98 [ 2.500000] 5d80: c0168970 c0168984 20000053 ffffffff [ 2.500000] [<c0008eb4>] (__irq_svc) from [<c0168984>] (read_seqbegin.constprop.0+0x6c/0x90) [ 2.500000] [<c0168984>] (read_seqbegin.constprop.0) from [<c016af54>] (d_lookup+0x14/0x40) [ 2.500000] [<c016af54>] (d_lookup) from [<c015cecc>] (lookup_dcache+0x18/0x50) [ 2.500000] [<c015cecc>] (lookup_dcache) from [<c015d868>] (lookup_one_len+0x90/0xe0) [ 2.500000] [<c015d868>] (lookup_one_len) from [<c01e33e4>] (start_creating+0x68/0xc4) [ 2.500000] [<c01e33e4>] (start_creating) from [<c01e398c>] (tracefs_create_file+0x30/0x11c) [ 2.500000] [<c01e398c>] (tracefs_create_file) from [<c00c42f8>] (trace_create_file+0x14/0x38) [ 2.500000] [<c00c42f8>] (trace_create_file) from [<c00cc854>] (event_create_dir+0x310/0x420) [ 2.500000] [<c00cc854>] (event_create_dir) from [<c00cc9d8>] (__trace_early_add_event_dirs+0x28/0x50) [ 2.500000] [<c00cc9d8>] (__trace_early_add_event_dirs) from [<c07c8d64>] (event_trace_init+0x70/0xd8) [ 2.500000] [<c07c8d64>] (event_trace_init) from [<c07c8560>] (tracer_init_tracefs+0x14/0x284) [ 2.500000] [<c07c8560>] (tracer_init_tracefs) from [<c000a330>] (do_one_initcall+0xdc/0x288) [ 2.500000] [<c000a330>] (do_one_initcall) from [<c07bd1e8>] (kernel_init_freeable+0x1c4/0x20c) [ 2.500000] [<c07bd1e8>] (kernel_init_freeable) from [<c05629c0>] (kernel_init+0x10/0x110) [ 2.500000] [<c05629c0>] (kernel_init) from [<c00084f8>] (ret_from_fork+0x14/0x3c) [ 2.500000] Exception stack(0xc1485fb0 to 0xc1485ff8) [ 2.500000] 5fa0: 00000000 00000000 00000000 00000000 [ 2.500000] 5fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 [ 2.500000] 5fe0: 00000000 00000000 00000000 00000000 00000013 00000000 Signed-off-by: Andy Lutomirski <[email protected]> [Jason: I extracted this from a larger in-progress series of Andy's that also unifies the two batches into one and does other performance things. Since that's still under development, but because we need this part to fix the CONFIG_PROVE_RAW_LOCK_NESTING issue, I've extracted it out and applied it to the current setup. This will also make it easier to backport to old kernels that also need the fix. I've also amended Andy's original commit message.] Reported-by: Jonathan Neuschäfer <[email protected]> Link: https://lore.kernel.org/lkml/YfMa0QgsjCVdRAvJ@latitude/ Fixes: b7d5dc2 ("random: add a spinlock_t to struct batched_entropy") Cc: Sebastian Andrzej Siewior <[email protected]> Cc: [email protected] Signed-off-by: Jason A. Donenfeld <[email protected]>
1 parent 86ece07 commit 3a49e3b

File tree

1 file changed

+32
-25
lines changed

1 file changed

+32
-25
lines changed

drivers/char/random.c

+32-25
Original file line numberDiff line numberDiff line change
@@ -2063,7 +2063,6 @@ struct batched_entropy {
20632063
u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
20642064
};
20652065
unsigned int position;
2066-
spinlock_t batch_lock;
20672066
};
20682067

20692068
/*
@@ -2075,50 +2074,63 @@ struct batched_entropy {
20752074
* point prior.
20762075
*/
20772076
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2078-
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2077+
.position = ARRAY_SIZE(((struct batched_entropy *)0)->entropy_u64)
20792078
};
20802079

20812080
u64 get_random_u64(void)
20822081
{
20832082
u64 ret;
20842083
unsigned long flags;
20852084
struct batched_entropy *batch;
2085+
unsigned int position;
20862086
static void *previous;
20872087

20882088
warn_unseeded_randomness(&previous);
20892089

2090-
batch = raw_cpu_ptr(&batched_entropy_u64);
2091-
spin_lock_irqsave(&batch->batch_lock, flags);
2092-
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2090+
local_irq_save(flags);
2091+
batch = this_cpu_ptr(&batched_entropy_u64);
2092+
position = READ_ONCE(batch->position);
2093+
/* NB: position can change to ARRAY_SIZE(batch->entropy_u64) out
2094+
* from under us -- see invalidate_batched_entropy(). If this
2095+
* happens it's okay if we still return the data in the batch. */
2096+
if (unlikely(position + 1 > ARRAY_SIZE(batch->entropy_u64))) {
20932097
extract_crng((u8 *)batch->entropy_u64);
2094-
batch->position = 0;
2098+
position = 0;
20952099
}
2096-
ret = batch->entropy_u64[batch->position++];
2097-
spin_unlock_irqrestore(&batch->batch_lock, flags);
2100+
ret = batch->entropy_u64[position++];
2101+
WRITE_ONCE(batch->position, position);
2102+
local_irq_restore(flags);
20982103
return ret;
20992104
}
21002105
EXPORT_SYMBOL(get_random_u64);
21012106

21022107
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2103-
.batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2108+
.position = ARRAY_SIZE(((struct batched_entropy *)0)->entropy_u32)
21042109
};
2110+
21052111
u32 get_random_u32(void)
21062112
{
21072113
u32 ret;
21082114
unsigned long flags;
21092115
struct batched_entropy *batch;
2116+
unsigned int position;
21102117
static void *previous;
21112118

21122119
warn_unseeded_randomness(&previous);
21132120

2114-
batch = raw_cpu_ptr(&batched_entropy_u32);
2115-
spin_lock_irqsave(&batch->batch_lock, flags);
2116-
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2121+
local_irq_save(flags);
2122+
batch = this_cpu_ptr(&batched_entropy_u32);
2123+
position = READ_ONCE(batch->position);
2124+
/* NB: position can change to ARRAY_SIZE(batch->entropy_u32) out
2125+
* from under us -- see invalidate_batched_entropy(). If this
2126+
* happens it's okay if we still return the data in the batch. */
2127+
if (unlikely(position + 1 > ARRAY_SIZE(batch->entropy_u32))) {
21172128
extract_crng((u8 *)batch->entropy_u32);
2118-
batch->position = 0;
2129+
position = 0;
21192130
}
2120-
ret = batch->entropy_u32[batch->position++];
2121-
spin_unlock_irqrestore(&batch->batch_lock, flags);
2131+
ret = batch->entropy_u64[position++];
2132+
WRITE_ONCE(batch->position, position);
2133+
local_irq_restore(flags);
21222134
return ret;
21232135
}
21242136
EXPORT_SYMBOL(get_random_u32);
@@ -2130,20 +2142,15 @@ EXPORT_SYMBOL(get_random_u32);
21302142
static void invalidate_batched_entropy(void)
21312143
{
21322144
int cpu;
2133-
unsigned long flags;
21342145

21352146
for_each_possible_cpu(cpu) {
2136-
struct batched_entropy *batched_entropy;
2147+
struct batched_entropy *batch;
21372148

2138-
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2139-
spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2140-
batched_entropy->position = 0;
2141-
spin_unlock(&batched_entropy->batch_lock);
2149+
batch = per_cpu_ptr(&batched_entropy_u32, cpu);
2150+
WRITE_ONCE(batch->position, ARRAY_SIZE(batch->entropy_u32));
21422151

2143-
batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2144-
spin_lock(&batched_entropy->batch_lock);
2145-
batched_entropy->position = 0;
2146-
spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2152+
batch = per_cpu_ptr(&batched_entropy_u64, cpu);
2153+
WRITE_ONCE(batch->position, ARRAY_SIZE(batch->entropy_u64));
21472154
}
21482155
}
21492156

0 commit comments

Comments
 (0)