random: group crng functions
[linux-block.git] / drivers / char / random.c
CommitLineData
a07fdae3 1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
1da177e4 2/*
9f9eff85 3 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
9e95ce27 4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
1da177e4
LT
5 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
6 * rights reserved.
1da177e4
LT
7 */
8
9/*
1da177e4
LT
10 * Exported interfaces ---- output
11 * ===============================
12 *
92e507d2 13 * There are four exported interfaces; two for use within the kernel,
c0a8a61e 14 * and two for use from userspace.
1da177e4 15 *
92e507d2
GS
16 * Exported interfaces ---- userspace output
17 * -----------------------------------------
1da177e4 18 *
92e507d2 19 * The userspace interfaces are two character devices /dev/random and
1da177e4
LT
20 * /dev/urandom. /dev/random is suitable for use when very high
21 * quality randomness is desired (for example, for key generation or
22 * one-time pads), as it will only return a maximum of the number of
23 * bits of randomness (as estimated by the random number generator)
24 * contained in the entropy pool.
25 *
26 * The /dev/urandom device does not have this limit, and will return
27 * as many bytes as are requested. As more and more random bytes are
28 * requested without giving time for the entropy pool to recharge,
29 * this will result in random numbers that are merely cryptographically
30 * strong. For many applications, however, this is acceptable.
31 *
92e507d2
GS
32 * Exported interfaces ---- kernel output
33 * --------------------------------------
34 *
186873c5 35 * The primary kernel interfaces are:
92e507d2 36 *
04ec96b7 37 * void get_random_bytes(void *buf, size_t nbytes);
248045b8
JD
38 * u32 get_random_u32()
39 * u64 get_random_u64()
40 * unsigned int get_random_int()
41 * unsigned long get_random_long()
92e507d2 42 *
186873c5
JD
43 * These interfaces will return the requested number of random bytes
44 * into the given buffer or as a return value. This is equivalent to a
45 * read from /dev/urandom. The get_random_{u32,u64,int,long}() family
46 * of functions may be higher performance for one-off random integers,
47 * because they do a bit of buffering.
92e507d2
GS
48 *
49 * prandom_u32()
50 * -------------
51 *
52 * For even weaker applications, see the pseudorandom generator
53 * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
54 * numbers aren't security-critical at all, these are *far* cheaper.
55 * Useful for self-tests, random error simulation, randomized backoffs,
56 * and any other application where you trust that nobody is trying to
57 * maliciously mess with you by guessing the "random" numbers.
58 *
1da177e4
LT
59 * Exported interfaces ---- input
60 * ==============================
61 *
62 * The current exported interfaces for gathering environmental noise
63 * from the devices are:
64 *
04ec96b7 65 * void add_device_randomness(const void *buf, size_t size);
248045b8 66 * void add_input_randomness(unsigned int type, unsigned int code,
1da177e4 67 * unsigned int value);
703f7066 68 * void add_interrupt_randomness(int irq);
248045b8 69 * void add_disk_randomness(struct gendisk *disk);
04ec96b7 70 * void add_hwgenerator_randomness(const void *buffer, size_t count,
2b6c6e3d 71 * size_t entropy);
04ec96b7 72 * void add_bootloader_randomness(const void *buf, size_t size);
1da177e4 73 *
a2080a67
LT
74 * add_device_randomness() is for adding data to the random pool that
75 * is likely to differ between two devices (or possibly even per boot).
76 * This would be things like MAC addresses or serial numbers, or the
77 * read-out of the RTC. This does *not* add any actual entropy to the
78 * pool, but it initializes the pool to different values for devices
79 * that might otherwise be identical and have very little entropy
80 * available to them (particularly common in the embedded world).
81 *
1da177e4
LT
82 * add_input_randomness() uses the input layer interrupt timing, as well as
83 * the event type information from the hardware.
84 *
775f4b29
TT
85 * add_interrupt_randomness() uses the interrupt timing as random
86 * inputs to the entropy pool. Using the cycle counters and the irq source
87 * as inputs, it feeds the randomness roughly once a second.
442a4fff
JW
88 *
89 * add_disk_randomness() uses what amounts to the seek time of block
90 * layer request events, on a per-disk_devt basis, as input to the
91 * entropy pool. Note that high-speed solid state drives with very low
92 * seek times do not make for good sources of entropy, as their seek
93 * times are usually fairly consistent.
1da177e4
LT
94 *
95 * All of these routines try to estimate how many bits of randomness a
96 * particular randomness source. They do this by keeping track of the
97 * first and second order deltas of the event timings.
98 *
2b6c6e3d
MB
99 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
100 * entropy as specified by the caller. If the entropy pool is full it will
101 * block until more entropy is needed.
102 *
103 * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
104 * add_device_randomness(), depending on whether or not the configuration
105 * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
106 *
1da177e4
LT
107 * Ensuring unpredictability at system startup
108 * ============================================
109 *
110 * When any operating system starts up, it will go through a sequence
111 * of actions that are fairly predictable by an adversary, especially
112 * if the start-up does not involve interaction with a human operator.
113 * This reduces the actual number of bits of unpredictability in the
114 * entropy pool below the value in entropy_count. In order to
115 * counteract this effect, it helps to carry information in the
116 * entropy pool across shut-downs and start-ups. To do this, put the
117 * following lines an appropriate script which is run during the boot
118 * sequence:
119 *
120 * echo "Initializing random number generator..."
121 * random_seed=/var/run/random-seed
122 * # Carry a random seed from start-up to start-up
123 * # Load and then save the whole entropy pool
124 * if [ -f $random_seed ]; then
125 * cat $random_seed >/dev/urandom
126 * else
127 * touch $random_seed
128 * fi
129 * chmod 600 $random_seed
130 * dd if=/dev/urandom of=$random_seed count=1 bs=512
131 *
132 * and the following lines in an appropriate script which is run as
133 * the system is shutdown:
134 *
135 * # Carry a random seed from shut-down to start-up
136 * # Save the whole entropy pool
137 * echo "Saving random seed..."
138 * random_seed=/var/run/random-seed
139 * touch $random_seed
140 * chmod 600 $random_seed
141 * dd if=/dev/urandom of=$random_seed count=1 bs=512
142 *
143 * For example, on most modern systems using the System V init
144 * scripts, such code fragments would be found in
145 * /etc/rc.d/init.d/random. On older Linux systems, the correct script
146 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
147 *
148 * Effectively, these commands cause the contents of the entropy pool
149 * to be saved at shut-down time and reloaded into the entropy pool at
150 * start-up. (The 'dd' in the addition to the bootup script is to
151 * make sure that /etc/random-seed is different for every start-up,
152 * even if the system crashes without executing rc.0.) Even with
153 * complete knowledge of the start-up activities, predicting the state
154 * of the entropy pool requires knowledge of the previous history of
155 * the system.
156 *
157 * Configuring the /dev/random driver under Linux
158 * ==============================================
159 *
160 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
161 * the /dev/mem major number (#1). So if your system does not have
162 * /dev/random and /dev/urandom created already, they can be created
163 * by using the commands:
164 *
248045b8
JD
165 * mknod /dev/random c 1 8
166 * mknod /dev/urandom c 1 9
1da177e4
LT
167 */
168
12cd53af
YL
169#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
170
1da177e4 171#include <linux/utsname.h>
1da177e4
LT
172#include <linux/module.h>
173#include <linux/kernel.h>
174#include <linux/major.h>
175#include <linux/string.h>
176#include <linux/fcntl.h>
177#include <linux/slab.h>
178#include <linux/random.h>
179#include <linux/poll.h>
180#include <linux/init.h>
181#include <linux/fs.h>
182#include <linux/genhd.h>
183#include <linux/interrupt.h>
27ac792c 184#include <linux/mm.h>
dd0f0cf5 185#include <linux/nodemask.h>
1da177e4 186#include <linux/spinlock.h>
c84dbf61 187#include <linux/kthread.h>
1da177e4 188#include <linux/percpu.h>
775f4b29 189#include <linux/ptrace.h>
6265e169 190#include <linux/workqueue.h>
0244ad00 191#include <linux/irq.h>
4e00b339 192#include <linux/ratelimit.h>
c6e9d6f3
TT
193#include <linux/syscalls.h>
194#include <linux/completion.h>
8da4b8c4 195#include <linux/uuid.h>
87e7d5ab 196#include <linux/uaccess.h>
1ca1b917 197#include <crypto/chacha.h>
9f9eff85 198#include <crypto/blake2s.h>
1da177e4 199#include <asm/processor.h>
1da177e4 200#include <asm/irq.h>
775f4b29 201#include <asm/irq_regs.h>
1da177e4
LT
202#include <asm/io.h>
203
5f1bb112
JD
204/*********************************************************************
205 *
206 * Initialization and readiness waiting.
207 *
208 * Much of the RNG infrastructure is devoted to various dependencies
209 * being able to wait until the RNG has collected enough entropy and
210 * is ready for safe consumption.
211 *
212 *********************************************************************/
205a525c 213
e192be9d
TT
214/*
215 * crng_init = 0 --> Uninitialized
216 * 1 --> Initialized
217 * 2 --> Initialized from input_pool
218 *
5f1bb112 219 * crng_init is protected by base_crng->lock, and only increases
e192be9d
TT
220 * its value (from 0->1->2).
221 */
222static int crng_init = 0;
43838a23 223#define crng_ready() (likely(crng_init > 1))
5f1bb112
JD
224/* Various types of waiters for crng_init->2 transition. */
225static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
226static struct fasync_struct *fasync;
227static DEFINE_SPINLOCK(random_ready_list_lock);
228static LIST_HEAD(random_ready_list);
e192be9d 229
5f1bb112 230/* Control how we warn userspace. */
4e00b339
TT
231static struct ratelimit_state unseeded_warning =
232 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
233static struct ratelimit_state urandom_warning =
234 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
4e00b339 235static int ratelimit_disable __read_mostly;
4e00b339
TT
236module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
237MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
238
5f1bb112
JD
239/*
240 * Returns whether or not the input pool has been seeded and thus guaranteed
241 * to supply cryptographically secure random numbers. This applies to: the
242 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
243 * ,u64,int,long} family of functions.
244 *
245 * Returns: true if the input pool has been seeded.
246 * false if the input pool has not been seeded.
247 */
248bool rng_is_initialized(void)
249{
250 return crng_ready();
251}
252EXPORT_SYMBOL(rng_is_initialized);
253
254/* Used by wait_for_random_bytes(), and considered an entropy collector, below. */
255static void try_to_generate_entropy(void);
256
257/*
258 * Wait for the input pool to be seeded and thus guaranteed to supply
259 * cryptographically secure random numbers. This applies to: the /dev/urandom
260 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
261 * family of functions. Using any of these functions without first calling
262 * this function forfeits the guarantee of security.
263 *
264 * Returns: 0 if the input pool has been seeded.
265 * -ERESTARTSYS if the function was interrupted by a signal.
266 */
267int wait_for_random_bytes(void)
268{
269 if (likely(crng_ready()))
270 return 0;
271
272 do {
273 int ret;
274 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
275 if (ret)
276 return ret > 0 ? 0 : ret;
277
278 try_to_generate_entropy();
279 } while (!crng_ready());
280
281 return 0;
282}
283EXPORT_SYMBOL(wait_for_random_bytes);
284
285/*
286 * Add a callback function that will be invoked when the input
287 * pool is initialised.
288 *
289 * returns: 0 if callback is successfully added
290 * -EALREADY if pool is already initialised (callback not called)
291 * -ENOENT if module for callback is not alive
292 */
293int add_random_ready_callback(struct random_ready_callback *rdy)
294{
295 struct module *owner;
296 unsigned long flags;
297 int err = -EALREADY;
298
299 if (crng_ready())
300 return err;
301
302 owner = rdy->owner;
303 if (!try_module_get(owner))
304 return -ENOENT;
305
306 spin_lock_irqsave(&random_ready_list_lock, flags);
307 if (crng_ready())
308 goto out;
309
310 owner = NULL;
311
312 list_add(&rdy->list, &random_ready_list);
313 err = 0;
314
315out:
316 spin_unlock_irqrestore(&random_ready_list_lock, flags);
317
318 module_put(owner);
319
320 return err;
321}
322EXPORT_SYMBOL(add_random_ready_callback);
323
324/*
325 * Delete a previously registered readiness callback function.
326 */
327void del_random_ready_callback(struct random_ready_callback *rdy)
328{
329 unsigned long flags;
330 struct module *owner = NULL;
331
332 spin_lock_irqsave(&random_ready_list_lock, flags);
333 if (!list_empty(&rdy->list)) {
334 list_del_init(&rdy->list);
335 owner = rdy->owner;
336 }
337 spin_unlock_irqrestore(&random_ready_list_lock, flags);
338
339 module_put(owner);
340}
341EXPORT_SYMBOL(del_random_ready_callback);
342
343static void process_random_ready_list(void)
344{
345 unsigned long flags;
346 struct random_ready_callback *rdy, *tmp;
347
348 spin_lock_irqsave(&random_ready_list_lock, flags);
349 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
350 struct module *owner = rdy->owner;
351
352 list_del_init(&rdy->list);
353 rdy->func(rdy);
354 module_put(owner);
355 }
356 spin_unlock_irqrestore(&random_ready_list_lock, flags);
357}
358
359#define warn_unseeded_randomness(previous) \
360 _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
361
362static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
363{
364#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
365 const bool print_once = false;
366#else
367 static bool print_once __read_mostly;
368#endif
369
370 if (print_once || crng_ready() ||
371 (previous && (caller == READ_ONCE(*previous))))
372 return;
373 WRITE_ONCE(*previous, caller);
374#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
375 print_once = true;
376#endif
377 if (__ratelimit(&unseeded_warning))
378 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
379 func_name, caller, crng_init);
380}
381
382
3655adc7 383/*********************************************************************
1da177e4 384 *
3655adc7 385 * Fast key erasure RNG, the "crng".
1da177e4 386 *
3655adc7
JD
387 * These functions expand entropy from the entropy extractor into
388 * long streams for external consumption using the "fast key erasure"
389 * RNG described at <https://blog.cr.yp.to/20170723-random.html>.
e192be9d 390 *
3655adc7
JD
391 * There are a few exported interfaces for use by other drivers:
392 *
393 * void get_random_bytes(void *buf, size_t nbytes)
394 * u32 get_random_u32()
395 * u64 get_random_u64()
396 * unsigned int get_random_int()
397 * unsigned long get_random_long()
398 *
399 * These interfaces will return the requested number of random bytes
400 * into the given buffer or as a return value. This is equivalent to
401 * a read from /dev/urandom. The integer family of functions may be
402 * higher performance for one-off random integers, because they do a
403 * bit of buffering.
e192be9d
TT
404 *
405 *********************************************************************/
406
186873c5
JD
407enum {
408 CRNG_RESEED_INTERVAL = 300 * HZ,
409 CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
410};
411
412static struct {
413 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
414 unsigned long birth;
415 unsigned long generation;
416 spinlock_t lock;
417} base_crng = {
418 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
419};
420
421struct crng {
422 u8 key[CHACHA_KEY_SIZE];
423 unsigned long generation;
424 local_lock_t lock;
425};
426
427static DEFINE_PER_CPU(struct crng, crngs) = {
428 .generation = ULONG_MAX,
429 .lock = INIT_LOCAL_LOCK(crngs.lock),
430};
e192be9d 431
3655adc7
JD
432/* Used by crng_reseed() to extract a new seed from the input pool. */
433static bool drain_entropy(void *buf, size_t nbytes);
e192be9d 434
dc12baac 435/*
3655adc7
JD
436 * This extracts a new crng key from the input pool, but only if there is a
437 * sufficient amount of entropy available, in order to mitigate bruteforcing
438 * of newly added bits.
dc12baac 439 */
a9412d51 440static void crng_reseed(void)
e192be9d 441{
248045b8 442 unsigned long flags;
186873c5
JD
443 unsigned long next_gen;
444 u8 key[CHACHA_KEY_SIZE];
7191c628 445 bool finalize_init = false;
e192be9d 446
246c03dd
JD
447 /* Only reseed if we can, to prevent brute forcing a small amount of new bits. */
448 if (!drain_entropy(key, sizeof(key)))
449 return;
a9412d51 450
186873c5
JD
451 /*
452 * We copy the new key into the base_crng, overwriting the old one,
453 * and update the generation counter. We avoid hitting ULONG_MAX,
454 * because the per-cpu crngs are initialized to ULONG_MAX, so this
455 * forces new CPUs that come online to always initialize.
456 */
457 spin_lock_irqsave(&base_crng.lock, flags);
458 memcpy(base_crng.key, key, sizeof(base_crng.key));
459 next_gen = base_crng.generation + 1;
460 if (next_gen == ULONG_MAX)
461 ++next_gen;
462 WRITE_ONCE(base_crng.generation, next_gen);
463 WRITE_ONCE(base_crng.birth, jiffies);
a9412d51 464 if (crng_init < 2) {
a9412d51 465 crng_init = 2;
7191c628
DB
466 finalize_init = true;
467 }
468 spin_unlock_irqrestore(&base_crng.lock, flags);
469 memzero_explicit(key, sizeof(key));
470 if (finalize_init) {
a9412d51
JD
471 process_random_ready_list();
472 wake_up_interruptible(&crng_init_wait);
473 kill_fasync(&fasync, SIGIO, POLL_IN);
474 pr_notice("crng init done\n");
475 if (unseeded_warning.missed) {
476 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
477 unseeded_warning.missed);
478 unseeded_warning.missed = 0;
479 }
480 if (urandom_warning.missed) {
481 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
482 urandom_warning.missed);
483 urandom_warning.missed = 0;
484 }
485 }
e192be9d
TT
486}
487
186873c5 488/*
3655adc7
JD
489 * This generates a ChaCha block using the provided key, and then
490 * immediately overwites that key with half the block. It returns
491 * the resultant ChaCha state to the user, along with the second
492 * half of the block containing 32 bytes of random data that may
493 * be used; random_data_len may not be greater than 32.
186873c5
JD
494 */
495static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
496 u32 chacha_state[CHACHA_STATE_WORDS],
497 u8 *random_data, size_t random_data_len)
e192be9d 498{
186873c5 499 u8 first_block[CHACHA_BLOCK_SIZE];
009ba856 500
186873c5
JD
501 BUG_ON(random_data_len > 32);
502
503 chacha_init_consts(chacha_state);
504 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
505 memset(&chacha_state[12], 0, sizeof(u32) * 4);
506 chacha20_block(chacha_state, first_block);
507
508 memcpy(key, first_block, CHACHA_KEY_SIZE);
509 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
510 memzero_explicit(first_block, sizeof(first_block));
1e7f583a
TT
511}
512
c92e040d 513/*
186873c5
JD
514 * This function returns a ChaCha state that you may use for generating
515 * random data. It also returns up to 32 bytes on its own of random data
516 * that may be used; random_data_len may not be greater than 32.
c92e040d 517 */
186873c5
JD
518static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
519 u8 *random_data, size_t random_data_len)
c92e040d 520{
248045b8 521 unsigned long flags;
186873c5 522 struct crng *crng;
c92e040d 523
186873c5
JD
524 BUG_ON(random_data_len > 32);
525
526 /*
527 * For the fast path, we check whether we're ready, unlocked first, and
528 * then re-check once locked later. In the case where we're really not
529 * ready, we do fast key erasure with the base_crng directly, because
530 * this is what crng_{fast,slow}_load mutate during early init.
531 */
532 if (unlikely(!crng_ready())) {
533 bool ready;
534
535 spin_lock_irqsave(&base_crng.lock, flags);
536 ready = crng_ready();
537 if (!ready)
538 crng_fast_key_erasure(base_crng.key, chacha_state,
539 random_data, random_data_len);
540 spin_unlock_irqrestore(&base_crng.lock, flags);
541 if (!ready)
542 return;
c92e040d 543 }
186873c5
JD
544
545 /*
546 * If the base_crng is more than 5 minutes old, we reseed, which
547 * in turn bumps the generation counter that we check below.
548 */
549 if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL)))
550 crng_reseed();
551
552 local_lock_irqsave(&crngs.lock, flags);
553 crng = raw_cpu_ptr(&crngs);
554
555 /*
556 * If our per-cpu crng is older than the base_crng, then it means
557 * somebody reseeded the base_crng. In that case, we do fast key
558 * erasure on the base_crng, and use its output as the new key
559 * for our per-cpu crng. This brings us up to date with base_crng.
560 */
561 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
562 spin_lock(&base_crng.lock);
563 crng_fast_key_erasure(base_crng.key, chacha_state,
564 crng->key, sizeof(crng->key));
565 crng->generation = base_crng.generation;
566 spin_unlock(&base_crng.lock);
567 }
568
569 /*
570 * Finally, when we've made it this far, our per-cpu crng has an up
571 * to date key, and we can do fast key erasure with it to produce
572 * some random data and a ChaCha state for the caller. All other
573 * branches of this function are "unlikely", so most of the time we
574 * should wind up here immediately.
575 */
576 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
577 local_unlock_irqrestore(&crngs.lock, flags);
c92e040d
TT
578}
579
3655adc7
JD
580/*
581 * This function is for crng_init == 0 only.
582 *
583 * crng_fast_load() can be called by code in the interrupt service
584 * path. So we can't afford to dilly-dally. Returns the number of
585 * bytes processed from cp.
586 */
587static size_t crng_fast_load(const void *cp, size_t len)
588{
589 static int crng_init_cnt = 0;
590 unsigned long flags;
591 const u8 *src = (const u8 *)cp;
592 size_t ret = 0;
593
594 if (!spin_trylock_irqsave(&base_crng.lock, flags))
595 return 0;
596 if (crng_init != 0) {
597 spin_unlock_irqrestore(&base_crng.lock, flags);
598 return 0;
599 }
600 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
601 base_crng.key[crng_init_cnt % sizeof(base_crng.key)] ^= *src;
602 src++; crng_init_cnt++; len--; ret++;
603 }
604 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
605 ++base_crng.generation;
606 crng_init = 1;
607 }
608 spin_unlock_irqrestore(&base_crng.lock, flags);
609 if (crng_init == 1)
610 pr_notice("fast init done\n");
611 return ret;
612}
613
614/*
615 * This function is for crng_init == 0 only.
616 *
617 * crng_slow_load() is called by add_device_randomness, which has two
618 * attributes. (1) We can't trust the buffer passed to it is
619 * guaranteed to be unpredictable (so it might not have any entropy at
620 * all), and (2) it doesn't have the performance constraints of
621 * crng_fast_load().
622 *
623 * So, we simply hash the contents in with the current key. Finally,
624 * we do *not* advance crng_init_cnt since buffer we may get may be
625 * something like a fixed DMI table (for example), which might very
626 * well be unique to the machine, but is otherwise unvarying.
627 */
628static void crng_slow_load(const void *cp, size_t len)
629{
630 unsigned long flags;
631 struct blake2s_state hash;
632
633 blake2s_init(&hash, sizeof(base_crng.key));
634
635 if (!spin_trylock_irqsave(&base_crng.lock, flags))
636 return;
637 if (crng_init != 0) {
638 spin_unlock_irqrestore(&base_crng.lock, flags);
639 return;
640 }
641
642 blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
643 blake2s_update(&hash, cp, len);
644 blake2s_final(&hash, base_crng.key);
645
646 spin_unlock_irqrestore(&base_crng.lock, flags);
647}
648
649static void _get_random_bytes(void *buf, size_t nbytes)
e192be9d 650{
186873c5 651 u32 chacha_state[CHACHA_STATE_WORDS];
3655adc7
JD
652 u8 tmp[CHACHA_BLOCK_SIZE];
653 size_t len;
654
655 if (!nbytes)
656 return;
657
658 len = min_t(size_t, 32, nbytes);
659 crng_make_state(chacha_state, buf, len);
660 nbytes -= len;
661 buf += len;
662
663 while (nbytes) {
664 if (nbytes < CHACHA_BLOCK_SIZE) {
665 chacha20_block(chacha_state, tmp);
666 memcpy(buf, tmp, nbytes);
667 memzero_explicit(tmp, sizeof(tmp));
668 break;
669 }
670
671 chacha20_block(chacha_state, buf);
672 if (unlikely(chacha_state[12] == 0))
673 ++chacha_state[13];
674 nbytes -= CHACHA_BLOCK_SIZE;
675 buf += CHACHA_BLOCK_SIZE;
676 }
677
678 memzero_explicit(chacha_state, sizeof(chacha_state));
679}
680
681/*
682 * This function is the exported kernel interface. It returns some
683 * number of good random numbers, suitable for key generation, seeding
684 * TCP sequence numbers, etc. It does not rely on the hardware random
685 * number generator. For random bytes direct from the hardware RNG
686 * (when available), use get_random_bytes_arch(). In order to ensure
687 * that the randomness provided by this function is okay, the function
688 * wait_for_random_bytes() should be called and return 0 at least once
689 * at any point prior.
690 */
691void get_random_bytes(void *buf, size_t nbytes)
692{
693 static void *previous;
694
695 warn_unseeded_randomness(&previous);
696 _get_random_bytes(buf, nbytes);
697}
698EXPORT_SYMBOL(get_random_bytes);
699
700static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
701{
702 bool large_request = nbytes > 256;
703 ssize_t ret = 0;
704 size_t len;
705 u32 chacha_state[CHACHA_STATE_WORDS];
706 u8 output[CHACHA_BLOCK_SIZE];
707
708 if (!nbytes)
709 return 0;
710
711 len = min_t(size_t, 32, nbytes);
712 crng_make_state(chacha_state, output, len);
713
714 if (copy_to_user(buf, output, len))
715 return -EFAULT;
716 nbytes -= len;
717 buf += len;
718 ret += len;
719
720 while (nbytes) {
721 if (large_request && need_resched()) {
722 if (signal_pending(current))
723 break;
724 schedule();
725 }
726
727 chacha20_block(chacha_state, output);
728 if (unlikely(chacha_state[12] == 0))
729 ++chacha_state[13];
730
731 len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
732 if (copy_to_user(buf, output, len)) {
733 ret = -EFAULT;
734 break;
735 }
736
737 nbytes -= len;
738 buf += len;
739 ret += len;
740 }
741
742 memzero_explicit(chacha_state, sizeof(chacha_state));
743 memzero_explicit(output, sizeof(output));
744 return ret;
745}
746
747/*
748 * Batched entropy returns random integers. The quality of the random
749 * number is good as /dev/urandom. In order to ensure that the randomness
750 * provided by this function is okay, the function wait_for_random_bytes()
751 * should be called and return 0 at least once at any point prior.
752 */
753struct batched_entropy {
754 union {
755 /*
756 * We make this 1.5x a ChaCha block, so that we get the
757 * remaining 32 bytes from fast key erasure, plus one full
758 * block from the detached ChaCha state. We can increase
759 * the size of this later if needed so long as we keep the
760 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
761 */
762 u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
763 u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
764 };
765 local_lock_t lock;
766 unsigned long generation;
767 unsigned int position;
768};
769
770
771static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
772 .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
773 .position = UINT_MAX
774};
775
776u64 get_random_u64(void)
777{
778 u64 ret;
779 unsigned long flags;
780 struct batched_entropy *batch;
781 static void *previous;
782 unsigned long next_gen;
783
784 warn_unseeded_randomness(&previous);
785
786 local_lock_irqsave(&batched_entropy_u64.lock, flags);
787 batch = raw_cpu_ptr(&batched_entropy_u64);
788
789 next_gen = READ_ONCE(base_crng.generation);
790 if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
791 next_gen != batch->generation) {
792 _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
793 batch->position = 0;
794 batch->generation = next_gen;
795 }
796
797 ret = batch->entropy_u64[batch->position];
798 batch->entropy_u64[batch->position] = 0;
799 ++batch->position;
800 local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
801 return ret;
802}
803EXPORT_SYMBOL(get_random_u64);
804
805static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
806 .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
807 .position = UINT_MAX
808};
809
810u32 get_random_u32(void)
811{
812 u32 ret;
813 unsigned long flags;
814 struct batched_entropy *batch;
815 static void *previous;
816 unsigned long next_gen;
817
818 warn_unseeded_randomness(&previous);
819
820 local_lock_irqsave(&batched_entropy_u32.lock, flags);
821 batch = raw_cpu_ptr(&batched_entropy_u32);
822
823 next_gen = READ_ONCE(base_crng.generation);
824 if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
825 next_gen != batch->generation) {
826 _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
827 batch->position = 0;
828 batch->generation = next_gen;
829 }
830
831 ret = batch->entropy_u32[batch->position];
832 batch->entropy_u32[batch->position] = 0;
833 ++batch->position;
834 local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
835 return ret;
836}
837EXPORT_SYMBOL(get_random_u32);
838
839/**
840 * randomize_page - Generate a random, page aligned address
841 * @start: The smallest acceptable address the caller will take.
842 * @range: The size of the area, starting at @start, within which the
843 * random address must fall.
844 *
845 * If @start + @range would overflow, @range is capped.
846 *
847 * NOTE: Historical use of randomize_range, which this replaces, presumed that
848 * @start was already page aligned. We now align it regardless.
849 *
850 * Return: A page aligned address within [start, start + range). On error,
851 * @start is returned.
852 */
853unsigned long randomize_page(unsigned long start, unsigned long range)
854{
855 if (!PAGE_ALIGNED(start)) {
856 range -= PAGE_ALIGN(start) - start;
857 start = PAGE_ALIGN(start);
858 }
859
860 if (start > ULONG_MAX - range)
861 range = ULONG_MAX - start;
862
863 range >>= PAGE_SHIFT;
864
865 if (range == 0)
866 return start;
867
868 return start + (get_random_long() % range << PAGE_SHIFT);
869}
870
871/*
872 * This function will use the architecture-specific hardware random
873 * number generator if it is available. It is not recommended for
874 * use. Use get_random_bytes() instead. It returns the number of
875 * bytes filled in.
876 */
877size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
878{
879 size_t left = nbytes;
880 u8 *p = buf;
881
882 while (left) {
883 unsigned long v;
884 size_t chunk = min_t(size_t, left, sizeof(unsigned long));
885
886 if (!arch_get_random_long(&v))
887 break;
888
889 memcpy(p, &v, chunk);
890 p += chunk;
891 left -= chunk;
892 }
893
894 return nbytes - left;
895}
896EXPORT_SYMBOL(get_random_bytes_arch);
897
898enum {
899 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
900 POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
901};
902
903/*
904 * Static global variables
905 */
906static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
907
908/**********************************************************************
909 *
910 * OS independent entropy store. Here are the functions which handle
911 * storing entropy in an entropy pool.
912 *
913 **********************************************************************/
914
915static struct {
916 struct blake2s_state hash;
917 spinlock_t lock;
918 unsigned int entropy_count;
919} input_pool = {
920 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
921 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
922 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
923 .hash.outlen = BLAKE2S_HASH_SIZE,
924 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
925};
926
927static void extract_entropy(void *buf, size_t nbytes);
928static bool drain_entropy(void *buf, size_t nbytes);
929
930static void crng_reseed(void);
931
932/*
933 * This function adds bytes into the entropy "pool". It does not
934 * update the entropy estimate. The caller should call
935 * credit_entropy_bits if this is appropriate.
936 */
937static void _mix_pool_bytes(const void *in, size_t nbytes)
938{
939 blake2s_update(&input_pool.hash, in, nbytes);
940}
941
942static void mix_pool_bytes(const void *in, size_t nbytes)
943{
944 unsigned long flags;
945
946 spin_lock_irqsave(&input_pool.lock, flags);
947 _mix_pool_bytes(in, nbytes);
948 spin_unlock_irqrestore(&input_pool.lock, flags);
949}
950
951struct fast_pool {
952 union {
953 u32 pool32[4];
954 u64 pool64[2];
955 };
956 unsigned long last;
957 u16 reg_idx;
958 u8 count;
959};
960
961/*
962 * This is a fast mixing routine used by the interrupt randomness
963 * collector. It's hardcoded for an 128 bit pool and assumes that any
964 * locks that might be needed are taken by the caller.
965 */
966static void fast_mix(u32 pool[4])
967{
968 u32 a = pool[0], b = pool[1];
969 u32 c = pool[2], d = pool[3];
970
971 a += b; c += d;
972 b = rol32(b, 6); d = rol32(d, 27);
973 d ^= a; b ^= c;
186873c5 974
3655adc7
JD
975 a += b; c += d;
976 b = rol32(b, 16); d = rol32(d, 14);
977 d ^= a; b ^= c;
186873c5 978
3655adc7
JD
979 a += b; c += d;
980 b = rol32(b, 6); d = rol32(d, 27);
981 d ^= a; b ^= c;
186873c5 982
3655adc7
JD
983 a += b; c += d;
984 b = rol32(b, 16); d = rol32(d, 14);
985 d ^= a; b ^= c;
e192be9d 986
3655adc7
JD
987 pool[0] = a; pool[1] = b;
988 pool[2] = c; pool[3] = d;
989}
e192be9d 990
3655adc7
JD
991static void credit_entropy_bits(size_t nbits)
992{
993 unsigned int entropy_count, orig, add;
186873c5 994
3655adc7
JD
995 if (!nbits)
996 return;
e192be9d 997
3655adc7 998 add = min_t(size_t, nbits, POOL_BITS);
e192be9d 999
3655adc7
JD
1000 do {
1001 orig = READ_ONCE(input_pool.entropy_count);
1002 entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
1003 } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
1004
1005 if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
1006 crng_reseed();
e192be9d
TT
1007}
1008
1da177e4
LT
1009/*********************************************************************
1010 *
1011 * Entropy input management
1012 *
1013 *********************************************************************/
1014
1015/* There is one of these per entropy source */
1016struct timer_rand_state {
1017 cycles_t last_time;
90b75ee5 1018 long last_delta, last_delta2;
1da177e4
LT
1019};
1020
644008df
TT
1021#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1022
a2080a67 1023/*
e192be9d
TT
1024 * Add device- or boot-specific data to the input pool to help
1025 * initialize it.
a2080a67 1026 *
e192be9d
TT
1027 * None of this adds any entropy; it is meant to avoid the problem of
1028 * the entropy pool having similar initial state across largely
1029 * identical devices.
a2080a67 1030 */
04ec96b7 1031void add_device_randomness(const void *buf, size_t size)
a2080a67 1032{
61875f30 1033 unsigned long time = random_get_entropy() ^ jiffies;
3ef4cb2d 1034 unsigned long flags;
a2080a67 1035
dc12baac
TT
1036 if (!crng_ready() && size)
1037 crng_slow_load(buf, size);
ee7998c5 1038
3ef4cb2d 1039 spin_lock_irqsave(&input_pool.lock, flags);
90ed1e67
JD
1040 _mix_pool_bytes(buf, size);
1041 _mix_pool_bytes(&time, sizeof(time));
3ef4cb2d 1042 spin_unlock_irqrestore(&input_pool.lock, flags);
a2080a67
LT
1043}
1044EXPORT_SYMBOL(add_device_randomness);
1045
644008df 1046static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
3060d6fe 1047
1da177e4
LT
1048/*
1049 * This function adds entropy to the entropy "pool" by using timing
1050 * delays. It uses the timer_rand_state structure to make an estimate
1051 * of how many bits of entropy this call has added to the pool.
1052 *
1053 * The number "num" is also added to the pool - it should somehow describe
1054 * the type of event which just happened. This is currently 0-255 for
1055 * keyboard scan codes, and 256 upwards for interrupts.
1056 *
1057 */
04ec96b7 1058static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1da177e4
LT
1059{
1060 struct {
1da177e4 1061 long jiffies;
d38bb085
JD
1062 unsigned int cycles;
1063 unsigned int num;
1da177e4
LT
1064 } sample;
1065 long delta, delta2, delta3;
1066
1da177e4 1067 sample.jiffies = jiffies;
61875f30 1068 sample.cycles = random_get_entropy();
1da177e4 1069 sample.num = num;
90ed1e67 1070 mix_pool_bytes(&sample, sizeof(sample));
1da177e4
LT
1071
1072 /*
1073 * Calculate number of bits of randomness we probably added.
1074 * We take into account the first, second and third-order deltas
1075 * in order to make our estimate.
1076 */
e00d996a
QC
1077 delta = sample.jiffies - READ_ONCE(state->last_time);
1078 WRITE_ONCE(state->last_time, sample.jiffies);
5e747dd9 1079
e00d996a
QC
1080 delta2 = delta - READ_ONCE(state->last_delta);
1081 WRITE_ONCE(state->last_delta, delta);
5e747dd9 1082
e00d996a
QC
1083 delta3 = delta2 - READ_ONCE(state->last_delta2);
1084 WRITE_ONCE(state->last_delta2, delta2);
5e747dd9
RV
1085
1086 if (delta < 0)
1087 delta = -delta;
1088 if (delta2 < 0)
1089 delta2 = -delta2;
1090 if (delta3 < 0)
1091 delta3 = -delta3;
1092 if (delta > delta2)
1093 delta = delta2;
1094 if (delta > delta3)
1095 delta = delta3;
1da177e4 1096
5e747dd9
RV
1097 /*
1098 * delta is now minimum absolute delta.
1099 * Round down by 1 bit on general principles,
727d499a 1100 * and limit entropy estimate to 12 bits.
5e747dd9 1101 */
04ec96b7 1102 credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
1da177e4
LT
1103}
1104
d251575a 1105void add_input_randomness(unsigned int type, unsigned int code,
248045b8 1106 unsigned int value)
1da177e4
LT
1107{
1108 static unsigned char last_value;
1109
1110 /* ignore autorepeat and the like */
1111 if (value == last_value)
1112 return;
1113
1da177e4
LT
1114 last_value = value;
1115 add_timer_randomness(&input_timer_state,
1116 (type << 4) ^ code ^ (code >> 4) ^ value);
1117}
80fc9f53 1118EXPORT_SYMBOL_GPL(add_input_randomness);
1da177e4 1119
775f4b29
TT
1120static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1121
d38bb085 1122static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
ee3e00e9 1123{
248045b8 1124 u32 *ptr = (u32 *)regs;
92e75428 1125 unsigned int idx;
ee3e00e9
TT
1126
1127 if (regs == NULL)
1128 return 0;
92e75428 1129 idx = READ_ONCE(f->reg_idx);
d38bb085 1130 if (idx >= sizeof(struct pt_regs) / sizeof(u32))
92e75428
TT
1131 idx = 0;
1132 ptr += idx++;
1133 WRITE_ONCE(f->reg_idx, idx);
9dfa7bba 1134 return *ptr;
ee3e00e9
TT
1135}
1136
703f7066 1137void add_interrupt_randomness(int irq)
1da177e4 1138{
248045b8
JD
1139 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1140 struct pt_regs *regs = get_irq_regs();
1141 unsigned long now = jiffies;
1142 cycles_t cycles = random_get_entropy();
3060d6fe 1143
ee3e00e9
TT
1144 if (cycles == 0)
1145 cycles = get_reg(fast_pool, regs);
3060d6fe 1146
b2f408fe
JD
1147 if (sizeof(cycles) == 8)
1148 fast_pool->pool64[0] ^= cycles ^ rol64(now, 32) ^ irq;
1149 else {
1150 fast_pool->pool32[0] ^= cycles ^ irq;
1151 fast_pool->pool32[1] ^= now;
1152 }
1153
1154 if (sizeof(unsigned long) == 8)
1155 fast_pool->pool64[1] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1156 else {
1157 fast_pool->pool32[2] ^= regs ? instruction_pointer(regs) : _RET_IP_;
1158 fast_pool->pool32[3] ^= get_reg(fast_pool, regs);
1159 }
1160
1161 fast_mix(fast_pool->pool32);
1162 ++fast_pool->count;
3060d6fe 1163
43838a23 1164 if (unlikely(crng_init == 0)) {
04ec96b7 1165 if (fast_pool->count >= 64 &&
b2f408fe 1166 crng_fast_load(fast_pool->pool32, sizeof(fast_pool->pool32)) > 0) {
e192be9d
TT
1167 fast_pool->count = 0;
1168 fast_pool->last = now;
c30c575d 1169 if (spin_trylock(&input_pool.lock)) {
b2f408fe 1170 _mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
c30c575d
JD
1171 spin_unlock(&input_pool.lock);
1172 }
e192be9d
TT
1173 }
1174 return;
1175 }
1176
248045b8 1177 if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
1da177e4
LT
1178 return;
1179
90ed1e67 1180 if (!spin_trylock(&input_pool.lock))
91fcb532 1181 return;
83664a69 1182
91fcb532 1183 fast_pool->last = now;
b2f408fe 1184 _mix_pool_bytes(&fast_pool->pool32, sizeof(fast_pool->pool32));
90ed1e67 1185 spin_unlock(&input_pool.lock);
83664a69 1186
ee3e00e9 1187 fast_pool->count = 0;
83664a69 1188
ee3e00e9 1189 /* award one bit for the contents of the fast pool */
90ed1e67 1190 credit_entropy_bits(1);
1da177e4 1191}
4b44f2d1 1192EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1da177e4 1193
9361401e 1194#ifdef CONFIG_BLOCK
1da177e4
LT
1195void add_disk_randomness(struct gendisk *disk)
1196{
1197 if (!disk || !disk->random)
1198 return;
1199 /* first major is 1, so we get >= 0x200 here */
f331c029 1200 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1da177e4 1201}
bdcfa3e5 1202EXPORT_SYMBOL_GPL(add_disk_randomness);
9361401e 1203#endif
1da177e4 1204
1da177e4
LT
1205/*********************************************************************
1206 *
1207 * Entropy extraction routines
1208 *
1209 *********************************************************************/
1210
19fa5be1 1211/*
6e8ec255
JD
1212 * This is an HKDF-like construction for using the hashed collected entropy
1213 * as a PRF key, that's then expanded block-by-block.
19fa5be1 1214 */
9c07f578 1215static void extract_entropy(void *buf, size_t nbytes)
1da177e4 1216{
902c098a 1217 unsigned long flags;
6e8ec255
JD
1218 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
1219 struct {
28f425e5 1220 unsigned long rdseed[32 / sizeof(long)];
6e8ec255
JD
1221 size_t counter;
1222 } block;
1223 size_t i;
1224
28f425e5
JD
1225 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
1226 if (!arch_get_random_seed_long(&block.rdseed[i]) &&
1227 !arch_get_random_long(&block.rdseed[i]))
1228 block.rdseed[i] = random_get_entropy();
85a1f777
TT
1229 }
1230
90ed1e67 1231 spin_lock_irqsave(&input_pool.lock, flags);
46884442 1232
6e8ec255
JD
1233 /* seed = HASHPRF(last_key, entropy_input) */
1234 blake2s_final(&input_pool.hash, seed);
1da177e4 1235
28f425e5 1236 /* next_key = HASHPRF(seed, RDSEED || 0) */
6e8ec255
JD
1237 block.counter = 0;
1238 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
1239 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
1da177e4 1240
6e8ec255
JD
1241 spin_unlock_irqrestore(&input_pool.lock, flags);
1242 memzero_explicit(next_key, sizeof(next_key));
e192be9d
TT
1243
1244 while (nbytes) {
6e8ec255 1245 i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
28f425e5 1246 /* output = HASHPRF(seed, RDSEED || ++counter) */
6e8ec255
JD
1247 ++block.counter;
1248 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
e192be9d
TT
1249 nbytes -= i;
1250 buf += i;
e192be9d
TT
1251 }
1252
6e8ec255
JD
1253 memzero_explicit(seed, sizeof(seed));
1254 memzero_explicit(&block, sizeof(block));
e192be9d
TT
1255}
1256
246c03dd
JD
1257/*
1258 * First we make sure we have POOL_MIN_BITS of entropy in the pool, and then we
1259 * set the entropy count to zero (but don't actually touch any data). Only then
1260 * can we extract a new key with extract_entropy().
1261 */
1262static bool drain_entropy(void *buf, size_t nbytes)
1263{
1264 unsigned int entropy_count;
1265 do {
1266 entropy_count = READ_ONCE(input_pool.entropy_count);
1267 if (entropy_count < POOL_MIN_BITS)
1268 return false;
1269 } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
1270 extract_entropy(buf, nbytes);
1271 wake_up_interruptible(&random_write_wait);
1272 kill_fasync(&fasync, SIGIO, POLL_OUT);
1273 return true;
1274}
1275
50ee7529
LT
1276/*
1277 * Each time the timer fires, we expect that we got an unpredictable
1278 * jump in the cycle counter. Even if the timer is running on another
1279 * CPU, the timer activity will be touching the stack of the CPU that is
1280 * generating entropy..
1281 *
1282 * Note that we don't re-arm the timer in the timer itself - we are
1283 * happy to be scheduled away, since that just makes the load more
1284 * complex, but we do not want the timer to keep ticking unless the
1285 * entropy loop is running.
1286 *
1287 * So the re-arming always happens in the entropy loop itself.
1288 */
1289static void entropy_timer(struct timer_list *t)
1290{
90ed1e67 1291 credit_entropy_bits(1);
50ee7529
LT
1292}
1293
1294/*
1295 * If we have an actual cycle counter, see if we can
1296 * generate enough entropy with timing noise
1297 */
1298static void try_to_generate_entropy(void)
1299{
1300 struct {
1301 unsigned long now;
1302 struct timer_list timer;
1303 } stack;
1304
1305 stack.now = random_get_entropy();
1306
1307 /* Slow counter - or none. Don't even bother */
1308 if (stack.now == random_get_entropy())
1309 return;
1310
1311 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1312 while (!crng_ready()) {
1313 if (!timer_pending(&stack.timer))
248045b8 1314 mod_timer(&stack.timer, jiffies + 1);
90ed1e67 1315 mix_pool_bytes(&stack.now, sizeof(stack.now));
50ee7529
LT
1316 schedule();
1317 stack.now = random_get_entropy();
1318 }
1319
1320 del_timer_sync(&stack.timer);
1321 destroy_timer_on_stack(&stack.timer);
90ed1e67 1322 mix_pool_bytes(&stack.now, sizeof(stack.now));
50ee7529
LT
1323}
1324
85664172
JD
1325static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
1326static int __init parse_trust_cpu(char *arg)
1327{
1328 return kstrtobool(arg, &trust_cpu);
1329}
1330early_param("random.trust_cpu", parse_trust_cpu);
1331
1da177e4 1332/*
85664172
JD
1333 * Note that setup_arch() may call add_device_randomness()
1334 * long before we get here. This allows seeding of the pools
1335 * with some platform dependent data very early in the boot
1336 * process. But it limits our options here. We must use
1337 * statically allocated structures that already have all
1338 * initializations complete at compile time. We should also
1339 * take care not to overwrite the precious per platform data
1340 * we were given.
1da177e4 1341 */
85664172 1342int __init rand_initialize(void)
1da177e4 1343{
04ec96b7 1344 size_t i;
902c098a 1345 ktime_t now = ktime_get_real();
85664172 1346 bool arch_init = true;
902c098a 1347 unsigned long rv;
1da177e4 1348
04ec96b7 1349 for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
85664172
JD
1350 if (!arch_get_random_seed_long_early(&rv) &&
1351 !arch_get_random_long_early(&rv)) {
1352 rv = random_get_entropy();
1353 arch_init = false;
1354 }
a02cf3d0 1355 mix_pool_bytes(&rv, sizeof(rv));
85664172 1356 }
a02cf3d0
JD
1357 mix_pool_bytes(&now, sizeof(now));
1358 mix_pool_bytes(utsname(), sizeof(*(utsname())));
1359
186873c5 1360 extract_entropy(base_crng.key, sizeof(base_crng.key));
0791e8b6
JD
1361 ++base_crng.generation;
1362
85664172 1363 if (arch_init && trust_cpu && crng_init < 2) {
85664172
JD
1364 crng_init = 2;
1365 pr_notice("crng init done (trusting CPU's manufacturer)\n");
1366 }
85664172 1367
4e00b339
TT
1368 if (ratelimit_disable) {
1369 urandom_warning.interval = 0;
1370 unseeded_warning.interval = 0;
1371 }
1da177e4
LT
1372 return 0;
1373}
1da177e4 1374
9361401e 1375#ifdef CONFIG_BLOCK
1da177e4
LT
1376void rand_initialize_disk(struct gendisk *disk)
1377{
1378 struct timer_rand_state *state;
1379
1380 /*
f8595815 1381 * If kzalloc returns null, we just won't use that entropy
1da177e4
LT
1382 * source.
1383 */
f8595815 1384 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
644008df
TT
1385 if (state) {
1386 state->last_time = INITIAL_JIFFIES;
1da177e4 1387 disk->random = state;
644008df 1388 }
1da177e4 1389}
9361401e 1390#endif
1da177e4 1391
248045b8
JD
1392static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1393 loff_t *ppos)
1da177e4 1394{
9b4d0087 1395 static int maxwarn = 10;
301f0595 1396
e192be9d 1397 if (!crng_ready() && maxwarn > 0) {
9b4d0087 1398 maxwarn--;
4e00b339 1399 if (__ratelimit(&urandom_warning))
12cd53af
YL
1400 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1401 current->comm, nbytes);
9b4d0087 1402 }
c6f1deb1 1403
14c17463 1404 return get_random_bytes_user(buf, nbytes);
1da177e4
LT
1405}
1406
248045b8
JD
1407static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1408 loff_t *ppos)
30c08efe
AL
1409{
1410 int ret;
1411
1412 ret = wait_for_random_bytes();
1413 if (ret != 0)
1414 return ret;
14c17463 1415 return get_random_bytes_user(buf, nbytes);
30c08efe
AL
1416}
1417
248045b8 1418static __poll_t random_poll(struct file *file, poll_table *wait)
1da177e4 1419{
a11e1d43 1420 __poll_t mask;
1da177e4 1421
30c08efe 1422 poll_wait(file, &crng_init_wait, wait);
a11e1d43
LT
1423 poll_wait(file, &random_write_wait, wait);
1424 mask = 0;
30c08efe 1425 if (crng_ready())
a9a08845 1426 mask |= EPOLLIN | EPOLLRDNORM;
489c7fc4 1427 if (input_pool.entropy_count < POOL_MIN_BITS)
a9a08845 1428 mask |= EPOLLOUT | EPOLLWRNORM;
1da177e4
LT
1429 return mask;
1430}
1431
04ec96b7 1432static int write_pool(const char __user *ubuf, size_t count)
1da177e4 1433{
04ec96b7 1434 size_t len;
7b5164fb 1435 int ret = 0;
04ec96b7 1436 u8 block[BLAKE2S_BLOCK_SIZE];
1da177e4 1437
04ec96b7
JD
1438 while (count) {
1439 len = min(count, sizeof(block));
7b5164fb
JD
1440 if (copy_from_user(block, ubuf, len)) {
1441 ret = -EFAULT;
1442 goto out;
1443 }
04ec96b7
JD
1444 count -= len;
1445 ubuf += len;
1446 mix_pool_bytes(block, len);
91f3f1e3 1447 cond_resched();
1da177e4 1448 }
7f397dcd 1449
7b5164fb
JD
1450out:
1451 memzero_explicit(block, sizeof(block));
1452 return ret;
7f397dcd
MM
1453}
1454
90b75ee5
MM
1455static ssize_t random_write(struct file *file, const char __user *buffer,
1456 size_t count, loff_t *ppos)
7f397dcd 1457{
04ec96b7 1458 int ret;
7f397dcd 1459
90ed1e67 1460 ret = write_pool(buffer, count);
7f397dcd
MM
1461 if (ret)
1462 return ret;
1463
7f397dcd 1464 return (ssize_t)count;
1da177e4
LT
1465}
1466
43ae4860 1467static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1da177e4
LT
1468{
1469 int size, ent_count;
1470 int __user *p = (int __user *)arg;
1471 int retval;
1472
1473 switch (cmd) {
1474 case RNDGETENTCNT:
43ae4860 1475 /* inherently racy, no point locking */
c5704490 1476 if (put_user(input_pool.entropy_count, p))
1da177e4
LT
1477 return -EFAULT;
1478 return 0;
1479 case RNDADDTOENTCNT:
1480 if (!capable(CAP_SYS_ADMIN))
1481 return -EPERM;
1482 if (get_user(ent_count, p))
1483 return -EFAULT;
a49c010e
JD
1484 if (ent_count < 0)
1485 return -EINVAL;
1486 credit_entropy_bits(ent_count);
1487 return 0;
1da177e4
LT
1488 case RNDADDENTROPY:
1489 if (!capable(CAP_SYS_ADMIN))
1490 return -EPERM;
1491 if (get_user(ent_count, p++))
1492 return -EFAULT;
1493 if (ent_count < 0)
1494 return -EINVAL;
1495 if (get_user(size, p++))
1496 return -EFAULT;
90ed1e67 1497 retval = write_pool((const char __user *)p, size);
1da177e4
LT
1498 if (retval < 0)
1499 return retval;
a49c010e
JD
1500 credit_entropy_bits(ent_count);
1501 return 0;
1da177e4
LT
1502 case RNDZAPENTCNT:
1503 case RNDCLEARPOOL:
ae9ecd92
TT
1504 /*
1505 * Clear the entropy pool counters. We no longer clear
1506 * the entropy pool, as that's silly.
1507 */
1da177e4
LT
1508 if (!capable(CAP_SYS_ADMIN))
1509 return -EPERM;
489c7fc4 1510 if (xchg(&input_pool.entropy_count, 0)) {
042e293e
JD
1511 wake_up_interruptible(&random_write_wait);
1512 kill_fasync(&fasync, SIGIO, POLL_OUT);
1513 }
1da177e4 1514 return 0;
d848e5f8
TT
1515 case RNDRESEEDCRNG:
1516 if (!capable(CAP_SYS_ADMIN))
1517 return -EPERM;
1518 if (crng_init < 2)
1519 return -ENODATA;
a9412d51 1520 crng_reseed();
d848e5f8 1521 return 0;
1da177e4
LT
1522 default:
1523 return -EINVAL;
1524 }
1525}
1526
9a6f70bb
JD
1527static int random_fasync(int fd, struct file *filp, int on)
1528{
1529 return fasync_helper(fd, filp, on, &fasync);
1530}
1531
2b8693c0 1532const struct file_operations random_fops = {
248045b8 1533 .read = random_read,
1da177e4 1534 .write = random_write,
248045b8 1535 .poll = random_poll,
43ae4860 1536 .unlocked_ioctl = random_ioctl,
507e4e2b 1537 .compat_ioctl = compat_ptr_ioctl,
9a6f70bb 1538 .fasync = random_fasync,
6038f373 1539 .llseek = noop_llseek,
1da177e4
LT
1540};
1541
2b8693c0 1542const struct file_operations urandom_fops = {
248045b8 1543 .read = urandom_read,
1da177e4 1544 .write = random_write,
43ae4860 1545 .unlocked_ioctl = random_ioctl,
4aa37c46 1546 .compat_ioctl = compat_ptr_ioctl,
9a6f70bb 1547 .fasync = random_fasync,
6038f373 1548 .llseek = noop_llseek,
1da177e4
LT
1549};
1550
248045b8
JD
1551SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1552 flags)
c6e9d6f3 1553{
248045b8 1554 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
75551dbf
AL
1555 return -EINVAL;
1556
1557 /*
1558 * Requesting insecure and blocking randomness at the same time makes
1559 * no sense.
1560 */
248045b8 1561 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
c6e9d6f3
TT
1562 return -EINVAL;
1563
1564 if (count > INT_MAX)
1565 count = INT_MAX;
1566
75551dbf 1567 if (!(flags & GRND_INSECURE) && !crng_ready()) {
04ec96b7
JD
1568 int ret;
1569
c6e9d6f3
TT
1570 if (flags & GRND_NONBLOCK)
1571 return -EAGAIN;
e297a783
JD
1572 ret = wait_for_random_bytes();
1573 if (unlikely(ret))
1574 return ret;
c6e9d6f3 1575 }
14c17463 1576 return get_random_bytes_user(buf, count);
c6e9d6f3
TT
1577}
1578
1da177e4
LT
1579/********************************************************************
1580 *
1581 * Sysctl interface
1582 *
1583 ********************************************************************/
1584
1585#ifdef CONFIG_SYSCTL
1586
1587#include <linux/sysctl.h>
1588
db61ffe3 1589static int random_min_urandom_seed = 60;
489c7fc4
JD
1590static int random_write_wakeup_bits = POOL_MIN_BITS;
1591static int sysctl_poolsize = POOL_BITS;
1da177e4
LT
1592static char sysctl_bootid[16];
1593
1594/*
f22052b2 1595 * This function is used to return both the bootid UUID, and random
1da177e4
LT
1596 * UUID. The difference is in whether table->data is NULL; if it is,
1597 * then a new UUID is generated and returned to the user.
1598 *
f22052b2
GP
1599 * If the user accesses this via the proc interface, the UUID will be
1600 * returned as an ASCII string in the standard UUID format; if via the
1601 * sysctl system call, as 16 bytes of binary data.
1da177e4 1602 */
248045b8
JD
1603static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1604 size_t *lenp, loff_t *ppos)
1da177e4 1605{
a151427e 1606 struct ctl_table fake_table;
1da177e4
LT
1607 unsigned char buf[64], tmp_uuid[16], *uuid;
1608
1609 uuid = table->data;
1610 if (!uuid) {
1611 uuid = tmp_uuid;
1da177e4 1612 generate_random_uuid(uuid);
44e4360f
MD
1613 } else {
1614 static DEFINE_SPINLOCK(bootid_spinlock);
1615
1616 spin_lock(&bootid_spinlock);
1617 if (!uuid[8])
1618 generate_random_uuid(uuid);
1619 spin_unlock(&bootid_spinlock);
1620 }
1da177e4 1621
35900771
JP
1622 sprintf(buf, "%pU", uuid);
1623
1da177e4
LT
1624 fake_table.data = buf;
1625 fake_table.maxlen = sizeof(buf);
1626
8d65af78 1627 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1da177e4
LT
1628}
1629
5475e8f0 1630static struct ctl_table random_table[] = {
1da177e4 1631 {
1da177e4
LT
1632 .procname = "poolsize",
1633 .data = &sysctl_poolsize,
1634 .maxlen = sizeof(int),
1635 .mode = 0444,
6d456111 1636 .proc_handler = proc_dointvec,
1da177e4
LT
1637 },
1638 {
1da177e4 1639 .procname = "entropy_avail",
c5704490 1640 .data = &input_pool.entropy_count,
1da177e4
LT
1641 .maxlen = sizeof(int),
1642 .mode = 0444,
c5704490 1643 .proc_handler = proc_dointvec,
1da177e4 1644 },
1da177e4 1645 {
1da177e4 1646 .procname = "write_wakeup_threshold",
2132a96f 1647 .data = &random_write_wakeup_bits,
1da177e4
LT
1648 .maxlen = sizeof(int),
1649 .mode = 0644,
489c7fc4 1650 .proc_handler = proc_dointvec,
1da177e4 1651 },
f5c2742c
TT
1652 {
1653 .procname = "urandom_min_reseed_secs",
1654 .data = &random_min_urandom_seed,
1655 .maxlen = sizeof(int),
1656 .mode = 0644,
1657 .proc_handler = proc_dointvec,
1658 },
1da177e4 1659 {
1da177e4
LT
1660 .procname = "boot_id",
1661 .data = &sysctl_bootid,
1662 .maxlen = 16,
1663 .mode = 0444,
6d456111 1664 .proc_handler = proc_do_uuid,
1da177e4
LT
1665 },
1666 {
1da177e4
LT
1667 .procname = "uuid",
1668 .maxlen = 16,
1669 .mode = 0444,
6d456111 1670 .proc_handler = proc_do_uuid,
1da177e4 1671 },
894d2491 1672 { }
1da177e4 1673};
5475e8f0
XN
1674
1675/*
1676 * rand_initialize() is called before sysctl_init(),
1677 * so we cannot call register_sysctl_init() in rand_initialize()
1678 */
1679static int __init random_sysctls_init(void)
1680{
1681 register_sysctl_init("kernel/random", random_table);
1682 return 0;
1683}
1684device_initcall(random_sysctls_init);
248045b8 1685#endif /* CONFIG_SYSCTL */
1da177e4 1686
c84dbf61
TD
1687/* Interface for in-kernel drivers of true hardware RNGs.
1688 * Those devices may produce endless random bits and will be throttled
1689 * when our pool is full.
1690 */
04ec96b7 1691void add_hwgenerator_randomness(const void *buffer, size_t count,
c84dbf61
TD
1692 size_t entropy)
1693{
43838a23 1694 if (unlikely(crng_init == 0)) {
73c7733f 1695 size_t ret = crng_fast_load(buffer, count);
90ed1e67 1696 mix_pool_bytes(buffer, ret);
73c7733f
JD
1697 count -= ret;
1698 buffer += ret;
1699 if (!count || crng_init == 0)
1700 return;
3371f3da 1701 }
e192be9d 1702
c321e907 1703 /* Throttle writing if we're above the trickle threshold.
489c7fc4
JD
1704 * We'll be woken up again once below POOL_MIN_BITS, when
1705 * the calling thread is about to terminate, or once
1706 * CRNG_RESEED_INTERVAL has elapsed.
e192be9d 1707 */
c321e907 1708 wait_event_interruptible_timeout(random_write_wait,
f7e67b8e 1709 !system_wq || kthread_should_stop() ||
489c7fc4 1710 input_pool.entropy_count < POOL_MIN_BITS,
c321e907 1711 CRNG_RESEED_INTERVAL);
90ed1e67
JD
1712 mix_pool_bytes(buffer, count);
1713 credit_entropy_bits(entropy);
c84dbf61
TD
1714}
1715EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
428826f5
HYW
1716
1717/* Handle random seed passed by bootloader.
1718 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
1719 * it would be regarded as device data.
1720 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
1721 */
04ec96b7 1722void add_bootloader_randomness(const void *buf, size_t size)
428826f5
HYW
1723{
1724 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
1725 add_hwgenerator_randomness(buf, size, size * 8);
1726 else
1727 add_device_randomness(buf, size);
1728}
3fd57e7a 1729EXPORT_SYMBOL_GPL(add_bootloader_randomness);