Skip to content

Commit

Permalink
random: do not throw away excess input to crng_fast_load
Browse files Browse the repository at this point in the history
When crng_fast_load() is called by add_hwgenerator_randomness(), we
currently will advance to crng_init==1 once we've acquired 64 bytes, and
then throw away the rest of the buffer. Usually, that is not a problem:
When add_hwgenerator_randomness() gets called via EFI or DT during
setup_arch(), there won't be any IRQ randomness. Therefore, the 64 bytes
passed by EFI exactly matches what is needed to advance to crng_init==1.
Usually, DT seems to pass 64 bytes as well -- with one notable exception
being kexec, which hands over 128 bytes of entropy to the kexec'd kernel.
In that case, we'll advance to crng_init==1 once 64 of those bytes are
consumed by crng_fast_load(), but won't continue onward feeding in bytes
to progress to crng_init==2. This commit fixes the issue by feeding
any leftover bytes into the next phase in add_hwgenerator_randomness().

[linux@dominikbrodowski.net: rewrite commit message]
Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
  • Loading branch information
Jason A. Donenfeld authored and Jason A. Donenfeld committed Jan 6, 2022
1 parent 9c3ddde commit 73c7733
Showing 1 changed file with 12 additions and 7 deletions.
19 changes: 12 additions & 7 deletions drivers/char/random.c
Original file line number Diff line number Diff line change
Expand Up @@ -919,12 +919,14 @@ static struct crng_state *select_crng(void)

/*
* crng_fast_load() can be called by code in the interrupt service
* path. So we can't afford to dilly-dally.
* path. So we can't afford to dilly-dally. Returns the number of
* bytes processed from cp.
*/
static int crng_fast_load(const char *cp, size_t len)
static size_t crng_fast_load(const char *cp, size_t len)
{
unsigned long flags;
char *p;
size_t ret = 0;

if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
Expand All @@ -935,15 +937,15 @@ static int crng_fast_load(const char *cp, size_t len)
p = (unsigned char *) &primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--;
cp++; crng_init_cnt++; len--; ret++;
}
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
invalidate_batched_entropy();
crng_init = 1;
pr_notice("fast init done\n");
}
return 1;
return ret;
}

/*
Expand Down Expand Up @@ -1294,7 +1296,7 @@ void add_interrupt_randomness(int irq)
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
crng_fast_load((char *) fast_pool->pool,
sizeof(fast_pool->pool))) {
sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0;
fast_pool->last = now;
}
Expand Down Expand Up @@ -2295,8 +2297,11 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
struct entropy_store *poolp = &input_pool;

if (unlikely(crng_init == 0)) {
crng_fast_load(buffer, count);
return;
size_t ret = crng_fast_load(buffer, count);
count -= ret;
buffer += ret;
if (!count || crng_init == 0)
return;
}

/* Suspend writing if we're above the trickle threshold.
Expand Down

0 comments on commit 73c7733

Please sign in to comment.