Skip to content

Commit

Permalink
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/gi…
Browse files Browse the repository at this point in the history
…t/crng/random.git
  • Loading branch information
Mark Brown committed Sep 27, 2022
2 parents a676c00 + 2c2f95d commit 6aa1989
Show file tree
Hide file tree
Showing 7 changed files with 75 additions and 54 deletions.
4 changes: 2 additions & 2 deletions drivers/char/mem.c
Original file line number Diff line number Diff line change
Expand Up @@ -712,8 +712,8 @@ static const struct memdev {
#endif
[5] = { "zero", 0666, &zero_fops, FMODE_NOWAIT },
[7] = { "full", 0666, &full_fops, 0 },
[8] = { "random", 0666, &random_fops, 0 },
[9] = { "urandom", 0666, &urandom_fops, 0 },
[8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
[9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
#ifdef CONFIG_PRINTK
[11] = { "kmsg", 0644, &kmsg_fops, 0 },
#endif
Expand Down
99 changes: 58 additions & 41 deletions drivers/char/random.c
Original file line number Diff line number Diff line change
Expand Up @@ -260,25 +260,23 @@ static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
}

/*
* Return whether the crng seed is considered to be sufficiently old
* that a reseeding is needed. This happens if the last reseeding
* was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval
* Return the interval until the next reseeding, which is normally
* CRNG_RESEED_INTERVAL, but during early boot, it is at an interval
* proportional to the uptime.
*/
static bool crng_has_old_seed(void)
static unsigned int crng_reseed_interval(void)
{
static bool early_boot = true;
unsigned long interval = CRNG_RESEED_INTERVAL;

if (unlikely(READ_ONCE(early_boot))) {
time64_t uptime = ktime_get_seconds();
if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
WRITE_ONCE(early_boot, false);
else
interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
(unsigned int)uptime / 2 * HZ);
return max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
(unsigned int)uptime / 2 * HZ);
}
return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
return CRNG_RESEED_INTERVAL;
}

/*
Expand Down Expand Up @@ -320,7 +318,7 @@ static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
* If the base_crng is old enough, we reseed, which in turn bumps the
* generation counter that we check below.
*/
if (unlikely(crng_has_old_seed()))
if (unlikely(time_is_before_jiffies(READ_ONCE(base_crng.birth) + crng_reseed_interval())))
crng_reseed();

local_lock_irqsave(&crngs.lock, flags);
Expand Down Expand Up @@ -774,18 +772,13 @@ static int random_pm_notification(struct notifier_block *nb, unsigned long actio
static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };

/*
* The first collection of entropy occurs at system boot while interrupts
* are still turned off. Here we push in latent entropy, RDSEED, a timestamp,
* utsname(), and the command line. Depending on the above configuration knob,
* RDSEED may be considered sufficient for initialization. Note that much
* earlier setup may already have pushed entropy into the input pool by the
* time we get here.
* This is called extremely early, before time keeping functionality is
* available, but arch randomness is. Interrupts are not yet enabled.
*/
int __init random_init(const char *command_line)
void __init random_init_early(const char *command_line)
{
ktime_t now = ktime_get_real();
size_t i, longs, arch_bits;
unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
size_t i, longs, arch_bits;

#if defined(LATENT_ENTROPY_PLUGIN)
static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
Expand All @@ -805,34 +798,49 @@ int __init random_init(const char *command_line)
i += longs;
continue;
}
entropy[0] = random_get_entropy();
_mix_pool_bytes(entropy, sizeof(*entropy));
arch_bits -= sizeof(*entropy) * 8;
++i;
}
_mix_pool_bytes(&now, sizeof(now));
_mix_pool_bytes(utsname(), sizeof(*(utsname())));

_mix_pool_bytes(init_utsname(), sizeof(*(init_utsname())));
_mix_pool_bytes(command_line, strlen(command_line));

/* Reseed if already seeded by earlier phases. */
if (crng_ready())
crng_reseed();
else if (trust_cpu)
_credit_init_bits(arch_bits);
}

/*
* This is called a little bit after the prior function, and now there is
* access to timestamps counters. Interrupts are not yet enabled.
*/
void __init random_init(void)
{
unsigned long entropy = random_get_entropy();
ktime_t now = ktime_get_real();

_mix_pool_bytes(&now, sizeof(now));
_mix_pool_bytes(&entropy, sizeof(entropy));
add_latent_entropy();

/*
* If we were initialized by the bootloader before jump labels are
* initialized, then we should enable the static branch here, where
* If we were initialized by the cpu or bootloader before jump labels
* are initialized, then we should enable the static branch here, where
* it's guaranteed that jump labels have been initialized.
*/
if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
crng_set_ready(NULL);

/* Reseed if already seeded by earlier phases. */
if (crng_ready())
crng_reseed();
else if (trust_cpu)
_credit_init_bits(arch_bits);

WARN_ON(register_pm_notifier(&pm_notifier));

WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG "
"entropy collection will consequently suffer.");
return 0;
WARN(!entropy, "Missing cycle counter and fallback timer; RNG "
"entropy collection will consequently suffer.");
}

/*
Expand Down Expand Up @@ -866,11 +874,11 @@ void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
credit_init_bits(entropy);

/*
* Throttle writing to once every CRNG_RESEED_INTERVAL, unless
* we're not yet initialized.
* Throttle writing to once every reseed interval, unless we're not yet
* initialized or no entropy is credited.
*/
if (!kthread_should_stop() && crng_ready())
schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
if (!kthread_should_stop() && (crng_ready() || !entropy))
schedule_timeout_interruptible(crng_reseed_interval());
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);

Expand Down Expand Up @@ -920,20 +928,23 @@ EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
#endif

struct fast_pool {
struct work_struct mix;
unsigned long pool[4];
unsigned long last;
unsigned int count;
struct timer_list mix;
};

static void mix_interrupt_randomness(struct timer_list *work);

static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
#ifdef CONFIG_64BIT
#define FASTMIX_PERM SIPHASH_PERMUTATION
.pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
.pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 },
#else
#define FASTMIX_PERM HSIPHASH_PERMUTATION
.pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
.pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 },
#endif
.mix = __TIMER_INITIALIZER(mix_interrupt_randomness, 0)
};

/*
Expand Down Expand Up @@ -975,7 +986,7 @@ int __cold random_online_cpu(unsigned int cpu)
}
#endif

static void mix_interrupt_randomness(struct work_struct *work)
static void mix_interrupt_randomness(struct timer_list *work)
{
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
/*
Expand Down Expand Up @@ -1006,7 +1017,7 @@ static void mix_interrupt_randomness(struct work_struct *work)
local_irq_enable();

mix_pool_bytes(pool, sizeof(pool));
credit_init_bits(max(1u, (count & U16_MAX) / 64));
credit_init_bits(clamp_t(unsigned int, (count & U16_MAX) / 64, 1, sizeof(pool) * 8));

memzero_explicit(pool, sizeof(pool));
}
Expand All @@ -1029,10 +1040,11 @@ void add_interrupt_randomness(int irq)
if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
return;

if (unlikely(!fast_pool->mix.func))
INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
fast_pool->count |= MIX_INFLIGHT;
queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
if (!timer_pending(&fast_pool->mix)) {
fast_pool->mix.expires = jiffies;
add_timer_on(&fast_pool->mix, raw_smp_processor_id());
}
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);

Expand Down Expand Up @@ -1347,6 +1359,11 @@ static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;

if (!crng_ready() &&
((kiocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) ||
(kiocb->ki_filp->f_flags & O_NONBLOCK)))
return -EAGAIN;

ret = wait_for_random_bytes();
if (ret != 0)
return ret;
Expand Down
3 changes: 2 additions & 1 deletion include/linux/random.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ static inline unsigned long get_random_canary(void)
return get_random_long() & CANARY_MASK;
}

int __init random_init(const char *command_line);
void __init random_init_early(const char *command_line);
void __init random_init(void);
bool rng_is_initialized(void);
int wait_for_random_bytes(void);

Expand Down
17 changes: 8 additions & 9 deletions init/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -976,6 +976,9 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
parse_args("Setting extra init args", extra_init_args,
NULL, 0, -1, -1, NULL, set_init_arg);

/* Architectural and non-timekeeping rng init, before allocator init */
random_init_early(command_line);

/*
* These use large bootmem allocations and must precede
* kmem_cache_init()
Expand Down Expand Up @@ -1035,17 +1038,13 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void)
hrtimers_init();
softirq_init();
timekeeping_init();
kfence_init();
time_init();

/*
* For best initial stack canary entropy, prepare it after:
* - setup_arch() for any UEFI RNG entropy and boot cmdline access
* - timekeeping_init() for ktime entropy used in random_init()
* - time_init() for making random_get_entropy() work on some platforms
* - random_init() to initialize the RNG from from early entropy sources
*/
random_init(command_line);
/* This must be after timekeeping is initialized */
random_init();

/* These make use of the fully initialized rng */
kfence_init();
boot_init_stack_canary();

perf_event_init();
Expand Down
2 changes: 2 additions & 0 deletions kernel/sys.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@
#include <linux/times.h>
#include <linux/posix-timers.h>
#include <linux/security.h>
#include <linux/random.h>
#include <linux/suspend.h>
#include <linux/tty.h>
#include <linux/signal.h>
Expand Down Expand Up @@ -1366,6 +1367,7 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
if (!copy_from_user(tmp, name, len)) {
struct new_utsname *u;

add_device_randomness(tmp, len);
down_write(&uts_sem);
u = utsname();
memcpy(u->nodename, tmp, len);
Expand Down
2 changes: 2 additions & 0 deletions kernel/utsname_sysctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/export.h>
#include <linux/uts.h>
#include <linux/utsname.h>
#include <linux/random.h>
#include <linux/sysctl.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
Expand Down Expand Up @@ -57,6 +58,7 @@ static int proc_do_uts_string(struct ctl_table *table, int write,
* theoretically be incorrect if there are two parallel writes
* at non-zero offsets to the same sysctl.
*/
add_device_randomness(tmp_data, sizeof(tmp_data));
down_write(&uts_sem);
memcpy(get_uts(table), tmp_data, sizeof(tmp_data));
up_write(&uts_sem);
Expand Down
2 changes: 1 addition & 1 deletion mm/kfence/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -864,7 +864,7 @@ static void kfence_init_enable(void)

void __init kfence_init(void)
{
stack_hash_seed = (u32)random_get_entropy();
stack_hash_seed = get_random_u32();

/* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
if (!kfence_sample_interval)
Expand Down

0 comments on commit 6aa1989

Please sign in to comment.