random: use simpler fast key erasure flow on per-cpu keys
[linux-block.git] / drivers / char / random.c
CommitLineData
1da177e4
LT
1/*
2 * random.c -- A strong random number generator
3 *
9f9eff85 4 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
b169c13d 5 *
9e95ce27 6 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
1da177e4
LT
7 *
8 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
9 * rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, and the entire permission notice in its entirety,
16 * including the disclaimer of warranties.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote
21 * products derived from this software without specific prior
22 * written permission.
23 *
24 * ALTERNATIVELY, this product may be distributed under the terms of
25 * the GNU General Public License, in which case the provisions of the GPL are
26 * required INSTEAD OF the above restrictions. (This clause is
27 * necessary due to a potential bad interaction between the GPL and
28 * the restrictions contained in a BSD-style copyright.)
29 *
30 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
33 * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
36 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
38 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
40 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
41 * DAMAGE.
42 */
43
44/*
1da177e4
LT
45 * Exported interfaces ---- output
46 * ===============================
47 *
92e507d2 48 * There are four exported interfaces; two for use within the kernel,
c0a8a61e 49 * and two for use from userspace.
1da177e4 50 *
92e507d2
GS
51 * Exported interfaces ---- userspace output
52 * -----------------------------------------
1da177e4 53 *
92e507d2 54 * The userspace interfaces are two character devices /dev/random and
1da177e4
LT
55 * /dev/urandom. /dev/random is suitable for use when very high
56 * quality randomness is desired (for example, for key generation or
57 * one-time pads), as it will only return a maximum of the number of
58 * bits of randomness (as estimated by the random number generator)
59 * contained in the entropy pool.
60 *
61 * The /dev/urandom device does not have this limit, and will return
62 * as many bytes as are requested. As more and more random bytes are
63 * requested without giving time for the entropy pool to recharge,
64 * this will result in random numbers that are merely cryptographically
65 * strong. For many applications, however, this is acceptable.
66 *
92e507d2
GS
67 * Exported interfaces ---- kernel output
68 * --------------------------------------
69 *
186873c5 70 * The primary kernel interfaces are:
92e507d2 71 *
248045b8 72 * void get_random_bytes(void *buf, int nbytes);
248045b8
JD
73 * u32 get_random_u32()
74 * u64 get_random_u64()
75 * unsigned int get_random_int()
76 * unsigned long get_random_long()
92e507d2 77 *
186873c5
JD
78 * These interfaces will return the requested number of random bytes
79 * into the given buffer or as a return value. This is equivalent to a
80 * read from /dev/urandom. The get_random_{u32,u64,int,long}() family
81 * of functions may be higher performance for one-off random integers,
82 * because they do a bit of buffering.
92e507d2
GS
83 *
84 * prandom_u32()
85 * -------------
86 *
87 * For even weaker applications, see the pseudorandom generator
88 * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
89 * numbers aren't security-critical at all, these are *far* cheaper.
90 * Useful for self-tests, random error simulation, randomized backoffs,
91 * and any other application where you trust that nobody is trying to
92 * maliciously mess with you by guessing the "random" numbers.
93 *
1da177e4
LT
94 * Exported interfaces ---- input
95 * ==============================
96 *
97 * The current exported interfaces for gathering environmental noise
98 * from the devices are:
99 *
a2080a67 100 * void add_device_randomness(const void *buf, unsigned int size);
248045b8 101 * void add_input_randomness(unsigned int type, unsigned int code,
1da177e4 102 * unsigned int value);
703f7066 103 * void add_interrupt_randomness(int irq);
248045b8 104 * void add_disk_randomness(struct gendisk *disk);
2b6c6e3d
MB
105 * void add_hwgenerator_randomness(const char *buffer, size_t count,
106 * size_t entropy);
107 * void add_bootloader_randomness(const void *buf, unsigned int size);
1da177e4 108 *
a2080a67
LT
109 * add_device_randomness() is for adding data to the random pool that
110 * is likely to differ between two devices (or possibly even per boot).
111 * This would be things like MAC addresses or serial numbers, or the
112 * read-out of the RTC. This does *not* add any actual entropy to the
113 * pool, but it initializes the pool to different values for devices
114 * that might otherwise be identical and have very little entropy
115 * available to them (particularly common in the embedded world).
116 *
1da177e4
LT
117 * add_input_randomness() uses the input layer interrupt timing, as well as
118 * the event type information from the hardware.
119 *
775f4b29
TT
120 * add_interrupt_randomness() uses the interrupt timing as random
121 * inputs to the entropy pool. Using the cycle counters and the irq source
122 * as inputs, it feeds the randomness roughly once a second.
442a4fff
JW
123 *
124 * add_disk_randomness() uses what amounts to the seek time of block
125 * layer request events, on a per-disk_devt basis, as input to the
126 * entropy pool. Note that high-speed solid state drives with very low
127 * seek times do not make for good sources of entropy, as their seek
128 * times are usually fairly consistent.
1da177e4
LT
129 *
130 * All of these routines try to estimate how many bits of randomness a
131 * particular randomness source. They do this by keeping track of the
132 * first and second order deltas of the event timings.
133 *
2b6c6e3d
MB
134 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
135 * entropy as specified by the caller. If the entropy pool is full it will
136 * block until more entropy is needed.
137 *
138 * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
139 * add_device_randomness(), depending on whether or not the configuration
140 * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
141 *
1da177e4
LT
142 * Ensuring unpredictability at system startup
143 * ============================================
144 *
145 * When any operating system starts up, it will go through a sequence
146 * of actions that are fairly predictable by an adversary, especially
147 * if the start-up does not involve interaction with a human operator.
148 * This reduces the actual number of bits of unpredictability in the
149 * entropy pool below the value in entropy_count. In order to
150 * counteract this effect, it helps to carry information in the
151 * entropy pool across shut-downs and start-ups. To do this, put the
152 * following lines an appropriate script which is run during the boot
153 * sequence:
154 *
155 * echo "Initializing random number generator..."
156 * random_seed=/var/run/random-seed
157 * # Carry a random seed from start-up to start-up
158 * # Load and then save the whole entropy pool
159 * if [ -f $random_seed ]; then
160 * cat $random_seed >/dev/urandom
161 * else
162 * touch $random_seed
163 * fi
164 * chmod 600 $random_seed
165 * dd if=/dev/urandom of=$random_seed count=1 bs=512
166 *
167 * and the following lines in an appropriate script which is run as
168 * the system is shutdown:
169 *
170 * # Carry a random seed from shut-down to start-up
171 * # Save the whole entropy pool
172 * echo "Saving random seed..."
173 * random_seed=/var/run/random-seed
174 * touch $random_seed
175 * chmod 600 $random_seed
176 * dd if=/dev/urandom of=$random_seed count=1 bs=512
177 *
178 * For example, on most modern systems using the System V init
179 * scripts, such code fragments would be found in
180 * /etc/rc.d/init.d/random. On older Linux systems, the correct script
181 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
182 *
183 * Effectively, these commands cause the contents of the entropy pool
184 * to be saved at shut-down time and reloaded into the entropy pool at
185 * start-up. (The 'dd' in the addition to the bootup script is to
186 * make sure that /etc/random-seed is different for every start-up,
187 * even if the system crashes without executing rc.0.) Even with
188 * complete knowledge of the start-up activities, predicting the state
189 * of the entropy pool requires knowledge of the previous history of
190 * the system.
191 *
192 * Configuring the /dev/random driver under Linux
193 * ==============================================
194 *
195 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
196 * the /dev/mem major number (#1). So if your system does not have
197 * /dev/random and /dev/urandom created already, they can be created
198 * by using the commands:
199 *
248045b8
JD
200 * mknod /dev/random c 1 8
201 * mknod /dev/urandom c 1 9
1da177e4
LT
202 */
203
12cd53af
YL
204#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
205
1da177e4 206#include <linux/utsname.h>
1da177e4
LT
207#include <linux/module.h>
208#include <linux/kernel.h>
209#include <linux/major.h>
210#include <linux/string.h>
211#include <linux/fcntl.h>
212#include <linux/slab.h>
213#include <linux/random.h>
214#include <linux/poll.h>
215#include <linux/init.h>
216#include <linux/fs.h>
217#include <linux/genhd.h>
218#include <linux/interrupt.h>
27ac792c 219#include <linux/mm.h>
dd0f0cf5 220#include <linux/nodemask.h>
1da177e4 221#include <linux/spinlock.h>
c84dbf61 222#include <linux/kthread.h>
1da177e4 223#include <linux/percpu.h>
775f4b29 224#include <linux/ptrace.h>
6265e169 225#include <linux/workqueue.h>
0244ad00 226#include <linux/irq.h>
4e00b339 227#include <linux/ratelimit.h>
c6e9d6f3
TT
228#include <linux/syscalls.h>
229#include <linux/completion.h>
8da4b8c4 230#include <linux/uuid.h>
1ca1b917 231#include <crypto/chacha.h>
9f9eff85 232#include <crypto/blake2s.h>
d178a1eb 233
1da177e4 234#include <asm/processor.h>
7c0f6ba6 235#include <linux/uaccess.h>
1da177e4 236#include <asm/irq.h>
775f4b29 237#include <asm/irq_regs.h>
1da177e4
LT
238#include <asm/io.h>
239
00ce1db1
TT
240#define CREATE_TRACE_POINTS
241#include <trace/events/random.h>
242
43759d4f
TT
243/* #define ADD_INTERRUPT_BENCH */
244
c5704490 245enum {
6e8ec255 246 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
c5704490 247 POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
1da177e4
LT
248};
249
1da177e4
LT
250/*
251 * Static global variables
252 */
a11e1d43 253static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
9a6f70bb 254static struct fasync_struct *fasync;
1da177e4 255
205a525c
HX
256static DEFINE_SPINLOCK(random_ready_list_lock);
257static LIST_HEAD(random_ready_list);
258
e192be9d
TT
259/*
260 * crng_init = 0 --> Uninitialized
261 * 1 --> Initialized
262 * 2 --> Initialized from input_pool
263 *
264 * crng_init is protected by primary_crng->lock, and only increases
265 * its value (from 0->1->2).
266 */
267static int crng_init = 0;
43838a23 268#define crng_ready() (likely(crng_init > 1))
e192be9d 269static int crng_init_cnt = 0;
e192be9d 270static void process_random_ready_list(void);
eecabf56 271static void _get_random_bytes(void *buf, int nbytes);
e192be9d 272
4e00b339
TT
273static struct ratelimit_state unseeded_warning =
274 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
275static struct ratelimit_state urandom_warning =
276 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
277
278static int ratelimit_disable __read_mostly;
279
280module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
281MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
282
1da177e4
LT
283/**********************************************************************
284 *
285 * OS independent entropy store. Here are the functions which handle
286 * storing entropy in an entropy pool.
287 *
288 **********************************************************************/
289
90ed1e67 290static struct {
6e8ec255 291 struct blake2s_state hash;
43358209 292 spinlock_t lock;
cda796a3 293 int entropy_count;
90ed1e67 294} input_pool = {
6e8ec255
JD
295 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
296 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
297 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
298 .hash.outlen = BLAKE2S_HASH_SIZE,
eece09ec 299 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
1da177e4
LT
300};
301
9c07f578 302static void extract_entropy(void *buf, size_t nbytes);
90ed1e67 303
a9412d51 304static void crng_reseed(void);
90ed1e67 305
1da177e4 306/*
e68e5b66 307 * This function adds bytes into the entropy "pool". It does not
1da177e4 308 * update the entropy estimate. The caller should call
adc782da 309 * credit_entropy_bits if this is appropriate.
1da177e4 310 */
90ed1e67 311static void _mix_pool_bytes(const void *in, int nbytes)
1da177e4 312{
6e8ec255 313 blake2s_update(&input_pool.hash, in, nbytes);
1da177e4
LT
314}
315
90ed1e67 316static void __mix_pool_bytes(const void *in, int nbytes)
00ce1db1 317{
90ed1e67
JD
318 trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
319 _mix_pool_bytes(in, nbytes);
00ce1db1
TT
320}
321
90ed1e67 322static void mix_pool_bytes(const void *in, int nbytes)
1da177e4 323{
902c098a
TT
324 unsigned long flags;
325
90ed1e67
JD
326 trace_mix_pool_bytes(nbytes, _RET_IP_);
327 spin_lock_irqsave(&input_pool.lock, flags);
328 _mix_pool_bytes(in, nbytes);
329 spin_unlock_irqrestore(&input_pool.lock, flags);
1da177e4
LT
330}
331
775f4b29 332struct fast_pool {
248045b8
JD
333 u32 pool[4];
334 unsigned long last;
335 u16 reg_idx;
336 u8 count;
775f4b29
TT
337};
338
339/*
340 * This is a fast mixing routine used by the interrupt randomness
341 * collector. It's hardcoded for an 128 bit pool and assumes that any
342 * locks that might be needed are taken by the caller.
343 */
43759d4f 344static void fast_mix(struct fast_pool *f)
775f4b29 345{
d38bb085
JD
346 u32 a = f->pool[0], b = f->pool[1];
347 u32 c = f->pool[2], d = f->pool[3];
43759d4f
TT
348
349 a += b; c += d;
19acc77a 350 b = rol32(b, 6); d = rol32(d, 27);
43759d4f
TT
351 d ^= a; b ^= c;
352
353 a += b; c += d;
19acc77a 354 b = rol32(b, 16); d = rol32(d, 14);
43759d4f
TT
355 d ^= a; b ^= c;
356
357 a += b; c += d;
19acc77a 358 b = rol32(b, 6); d = rol32(d, 27);
43759d4f
TT
359 d ^= a; b ^= c;
360
361 a += b; c += d;
19acc77a 362 b = rol32(b, 16); d = rol32(d, 14);
43759d4f
TT
363 d ^= a; b ^= c;
364
365 f->pool[0] = a; f->pool[1] = b;
366 f->pool[2] = c; f->pool[3] = d;
655b2264 367 f->count++;
775f4b29
TT
368}
369
205a525c
HX
370static void process_random_ready_list(void)
371{
372 unsigned long flags;
373 struct random_ready_callback *rdy, *tmp;
374
375 spin_lock_irqsave(&random_ready_list_lock, flags);
376 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
377 struct module *owner = rdy->owner;
378
379 list_del_init(&rdy->list);
380 rdy->func(rdy);
381 module_put(owner);
382 }
383 spin_unlock_irqrestore(&random_ready_list_lock, flags);
384}
385
90ed1e67 386static void credit_entropy_bits(int nbits)
1da177e4 387{
9c07f578 388 int entropy_count, orig;
18263c4e 389
a49c010e 390 if (nbits <= 0)
adc782da
MM
391 return;
392
a49c010e
JD
393 nbits = min(nbits, POOL_BITS);
394
c5704490
JD
395 do {
396 orig = READ_ONCE(input_pool.entropy_count);
397 entropy_count = min(POOL_BITS, orig + nbits);
398 } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
1da177e4 399
c5704490 400 trace_credit_entropy_bits(nbits, entropy_count, _RET_IP_);
00ce1db1 401
c5704490 402 if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
a9412d51 403 crng_reseed();
1da177e4
LT
404}
405
e192be9d
TT
406/*********************************************************************
407 *
408 * CRNG using CHACHA20
409 *
410 *********************************************************************/
411
186873c5
JD
412enum {
413 CRNG_RESEED_INTERVAL = 300 * HZ,
414 CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
415};
416
417static struct {
418 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
419 unsigned long birth;
420 unsigned long generation;
421 spinlock_t lock;
422} base_crng = {
423 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
424};
425
426struct crng {
427 u8 key[CHACHA_KEY_SIZE];
428 unsigned long generation;
429 local_lock_t lock;
430};
431
432static DEFINE_PER_CPU(struct crng, crngs) = {
433 .generation = ULONG_MAX,
434 .lock = INIT_LOCAL_LOCK(crngs.lock),
435};
e192be9d
TT
436
437static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
438
b169c13d
JD
439static void invalidate_batched_entropy(void);
440
dc12baac
TT
441/*
442 * crng_fast_load() can be called by code in the interrupt service
73c7733f
JD
443 * path. So we can't afford to dilly-dally. Returns the number of
444 * bytes processed from cp.
dc12baac 445 */
d38bb085 446static size_t crng_fast_load(const u8 *cp, size_t len)
e192be9d
TT
447{
448 unsigned long flags;
d38bb085 449 u8 *p;
73c7733f 450 size_t ret = 0;
e192be9d 451
186873c5 452 if (!spin_trylock_irqsave(&base_crng.lock, flags))
e192be9d 453 return 0;
43838a23 454 if (crng_init != 0) {
186873c5 455 spin_unlock_irqrestore(&base_crng.lock, flags);
e192be9d
TT
456 return 0;
457 }
186873c5 458 p = base_crng.key;
e192be9d 459 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
186873c5 460 p[crng_init_cnt % sizeof(base_crng.key)] ^= *cp;
73c7733f 461 cp++; crng_init_cnt++; len--; ret++;
e192be9d
TT
462 }
463 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
b169c13d 464 invalidate_batched_entropy();
e192be9d 465 crng_init = 1;
e192be9d 466 }
186873c5 467 spin_unlock_irqrestore(&base_crng.lock, flags);
7c2fe2b3
DB
468 if (crng_init == 1)
469 pr_notice("fast init done\n");
73c7733f 470 return ret;
e192be9d
TT
471}
472
dc12baac
TT
473/*
474 * crng_slow_load() is called by add_device_randomness, which has two
475 * attributes. (1) We can't trust the buffer passed to it is
476 * guaranteed to be unpredictable (so it might not have any entropy at
477 * all), and (2) it doesn't have the performance constraints of
478 * crng_fast_load().
479 *
480 * So we do something more comprehensive which is guaranteed to touch
481 * all of the primary_crng's state, and which uses a LFSR with a
482 * period of 255 as part of the mixing algorithm. Finally, we do
483 * *not* advance crng_init_cnt since buffer we may get may be something
484 * like a fixed DMI table (for example), which might very well be
485 * unique to the machine, but is otherwise unvarying.
486 */
d38bb085 487static int crng_slow_load(const u8 *cp, size_t len)
dc12baac 488{
248045b8
JD
489 unsigned long flags;
490 static u8 lfsr = 1;
491 u8 tmp;
186873c5 492 unsigned int i, max = sizeof(base_crng.key);
248045b8 493 const u8 *src_buf = cp;
186873c5 494 u8 *dest_buf = base_crng.key;
dc12baac 495
186873c5 496 if (!spin_trylock_irqsave(&base_crng.lock, flags))
dc12baac
TT
497 return 0;
498 if (crng_init != 0) {
186873c5 499 spin_unlock_irqrestore(&base_crng.lock, flags);
dc12baac
TT
500 return 0;
501 }
502 if (len > max)
503 max = len;
504
248045b8 505 for (i = 0; i < max; i++) {
dc12baac
TT
506 tmp = lfsr;
507 lfsr >>= 1;
508 if (tmp & 1)
509 lfsr ^= 0xE1;
186873c5
JD
510 tmp = dest_buf[i % sizeof(base_crng.key)];
511 dest_buf[i % sizeof(base_crng.key)] ^= src_buf[i % len] ^ lfsr;
dc12baac
TT
512 lfsr += (tmp << 3) | (tmp >> 5);
513 }
186873c5 514 spin_unlock_irqrestore(&base_crng.lock, flags);
dc12baac
TT
515 return 1;
516}
517
a9412d51 518static void crng_reseed(void)
e192be9d 519{
248045b8 520 unsigned long flags;
186873c5
JD
521 int entropy_count;
522 unsigned long next_gen;
523 u8 key[CHACHA_KEY_SIZE];
e192be9d 524
186873c5
JD
525 /*
526 * First we make sure we have POOL_MIN_BITS of entropy in the pool,
527 * and then we drain all of it. Only then can we extract a new key.
528 */
a9412d51
JD
529 do {
530 entropy_count = READ_ONCE(input_pool.entropy_count);
531 if (entropy_count < POOL_MIN_BITS)
532 return;
533 } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
186873c5 534 extract_entropy(key, sizeof(key));
a9412d51
JD
535 wake_up_interruptible(&random_write_wait);
536 kill_fasync(&fasync, SIGIO, POLL_OUT);
537
186873c5
JD
538 /*
539 * We copy the new key into the base_crng, overwriting the old one,
540 * and update the generation counter. We avoid hitting ULONG_MAX,
541 * because the per-cpu crngs are initialized to ULONG_MAX, so this
542 * forces new CPUs that come online to always initialize.
543 */
544 spin_lock_irqsave(&base_crng.lock, flags);
545 memcpy(base_crng.key, key, sizeof(base_crng.key));
546 next_gen = base_crng.generation + 1;
547 if (next_gen == ULONG_MAX)
548 ++next_gen;
549 WRITE_ONCE(base_crng.generation, next_gen);
550 WRITE_ONCE(base_crng.birth, jiffies);
551 spin_unlock_irqrestore(&base_crng.lock, flags);
552 memzero_explicit(key, sizeof(key));
553
a9412d51
JD
554 if (crng_init < 2) {
555 invalidate_batched_entropy();
556 crng_init = 2;
557 process_random_ready_list();
558 wake_up_interruptible(&crng_init_wait);
559 kill_fasync(&fasync, SIGIO, POLL_IN);
560 pr_notice("crng init done\n");
561 if (unseeded_warning.missed) {
562 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
563 unseeded_warning.missed);
564 unseeded_warning.missed = 0;
565 }
566 if (urandom_warning.missed) {
567 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
568 urandom_warning.missed);
569 urandom_warning.missed = 0;
570 }
571 }
e192be9d
TT
572}
573
186873c5
JD
574/*
575 * The general form here is based on a "fast key erasure RNG" from
576 * <https://blog.cr.yp.to/20170723-random.html>. It generates a ChaCha
577 * block using the provided key, and then immediately overwites that
578 * key with half the block. It returns the resultant ChaCha state to the
579 * user, along with the second half of the block containing 32 bytes of
580 * random data that may be used; random_data_len may not be greater than
581 * 32.
582 */
583static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
584 u32 chacha_state[CHACHA_STATE_WORDS],
585 u8 *random_data, size_t random_data_len)
e192be9d 586{
186873c5 587 u8 first_block[CHACHA_BLOCK_SIZE];
009ba856 588
186873c5
JD
589 BUG_ON(random_data_len > 32);
590
591 chacha_init_consts(chacha_state);
592 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
593 memset(&chacha_state[12], 0, sizeof(u32) * 4);
594 chacha20_block(chacha_state, first_block);
595
596 memcpy(key, first_block, CHACHA_KEY_SIZE);
597 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
598 memzero_explicit(first_block, sizeof(first_block));
1e7f583a
TT
599}
600
c92e040d 601/*
186873c5
JD
602 * This function returns a ChaCha state that you may use for generating
603 * random data. It also returns up to 32 bytes on its own of random data
604 * that may be used; random_data_len may not be greater than 32.
c92e040d 605 */
186873c5
JD
606static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
607 u8 *random_data, size_t random_data_len)
c92e040d 608{
248045b8 609 unsigned long flags;
186873c5 610 struct crng *crng;
c92e040d 611
186873c5
JD
612 BUG_ON(random_data_len > 32);
613
614 /*
615 * For the fast path, we check whether we're ready, unlocked first, and
616 * then re-check once locked later. In the case where we're really not
617 * ready, we do fast key erasure with the base_crng directly, because
618 * this is what crng_{fast,slow}_load mutate during early init.
619 */
620 if (unlikely(!crng_ready())) {
621 bool ready;
622
623 spin_lock_irqsave(&base_crng.lock, flags);
624 ready = crng_ready();
625 if (!ready)
626 crng_fast_key_erasure(base_crng.key, chacha_state,
627 random_data, random_data_len);
628 spin_unlock_irqrestore(&base_crng.lock, flags);
629 if (!ready)
630 return;
c92e040d 631 }
186873c5
JD
632
633 /*
634 * If the base_crng is more than 5 minutes old, we reseed, which
635 * in turn bumps the generation counter that we check below.
636 */
637 if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL)))
638 crng_reseed();
639
640 local_lock_irqsave(&crngs.lock, flags);
641 crng = raw_cpu_ptr(&crngs);
642
643 /*
644 * If our per-cpu crng is older than the base_crng, then it means
645 * somebody reseeded the base_crng. In that case, we do fast key
646 * erasure on the base_crng, and use its output as the new key
647 * for our per-cpu crng. This brings us up to date with base_crng.
648 */
649 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
650 spin_lock(&base_crng.lock);
651 crng_fast_key_erasure(base_crng.key, chacha_state,
652 crng->key, sizeof(crng->key));
653 crng->generation = base_crng.generation;
654 spin_unlock(&base_crng.lock);
655 }
656
657 /*
658 * Finally, when we've made it this far, our per-cpu crng has an up
659 * to date key, and we can do fast key erasure with it to produce
660 * some random data and a ChaCha state for the caller. All other
661 * branches of this function are "unlikely", so most of the time we
662 * should wind up here immediately.
663 */
664 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
665 local_unlock_irqrestore(&crngs.lock, flags);
c92e040d
TT
666}
667
186873c5 668static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
e192be9d 669{
186873c5
JD
670 bool large_request = nbytes > 256;
671 ssize_t ret = 0, len;
672 u32 chacha_state[CHACHA_STATE_WORDS];
673 u8 output[CHACHA_BLOCK_SIZE];
674
675 if (!nbytes)
676 return 0;
677
678 len = min_t(ssize_t, 32, nbytes);
679 crng_make_state(chacha_state, output, len);
680
681 if (copy_to_user(buf, output, len))
682 return -EFAULT;
683 nbytes -= len;
684 buf += len;
685 ret += len;
e192be9d
TT
686
687 while (nbytes) {
688 if (large_request && need_resched()) {
186873c5 689 if (signal_pending(current))
e192be9d 690 break;
e192be9d
TT
691 schedule();
692 }
693
186873c5
JD
694 chacha20_block(chacha_state, output);
695 if (unlikely(chacha_state[12] == 0))
696 ++chacha_state[13];
697
698 len = min_t(ssize_t, nbytes, CHACHA_BLOCK_SIZE);
699 if (copy_to_user(buf, output, len)) {
e192be9d
TT
700 ret = -EFAULT;
701 break;
702 }
703
186873c5
JD
704 nbytes -= len;
705 buf += len;
706 ret += len;
e192be9d 707 }
e192be9d 708
186873c5
JD
709 memzero_explicit(chacha_state, sizeof(chacha_state));
710 memzero_explicit(output, sizeof(output));
e192be9d
TT
711 return ret;
712}
713
1da177e4
LT
714/*********************************************************************
715 *
716 * Entropy input management
717 *
718 *********************************************************************/
719
720/* There is one of these per entropy source */
721struct timer_rand_state {
722 cycles_t last_time;
90b75ee5 723 long last_delta, last_delta2;
1da177e4
LT
724};
725
644008df
TT
726#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
727
a2080a67 728/*
e192be9d
TT
729 * Add device- or boot-specific data to the input pool to help
730 * initialize it.
a2080a67 731 *
e192be9d
TT
732 * None of this adds any entropy; it is meant to avoid the problem of
733 * the entropy pool having similar initial state across largely
734 * identical devices.
a2080a67
LT
735 */
736void add_device_randomness(const void *buf, unsigned int size)
737{
61875f30 738 unsigned long time = random_get_entropy() ^ jiffies;
3ef4cb2d 739 unsigned long flags;
a2080a67 740
dc12baac
TT
741 if (!crng_ready() && size)
742 crng_slow_load(buf, size);
ee7998c5 743
5910895f 744 trace_add_device_randomness(size, _RET_IP_);
3ef4cb2d 745 spin_lock_irqsave(&input_pool.lock, flags);
90ed1e67
JD
746 _mix_pool_bytes(buf, size);
747 _mix_pool_bytes(&time, sizeof(time));
3ef4cb2d 748 spin_unlock_irqrestore(&input_pool.lock, flags);
a2080a67
LT
749}
750EXPORT_SYMBOL(add_device_randomness);
751
644008df 752static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
3060d6fe 753
1da177e4
LT
754/*
755 * This function adds entropy to the entropy "pool" by using timing
756 * delays. It uses the timer_rand_state structure to make an estimate
757 * of how many bits of entropy this call has added to the pool.
758 *
759 * The number "num" is also added to the pool - it should somehow describe
760 * the type of event which just happened. This is currently 0-255 for
761 * keyboard scan codes, and 256 upwards for interrupts.
762 *
763 */
764static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
765{
766 struct {
1da177e4 767 long jiffies;
d38bb085
JD
768 unsigned int cycles;
769 unsigned int num;
1da177e4
LT
770 } sample;
771 long delta, delta2, delta3;
772
1da177e4 773 sample.jiffies = jiffies;
61875f30 774 sample.cycles = random_get_entropy();
1da177e4 775 sample.num = num;
90ed1e67 776 mix_pool_bytes(&sample, sizeof(sample));
1da177e4
LT
777
778 /*
779 * Calculate number of bits of randomness we probably added.
780 * We take into account the first, second and third-order deltas
781 * in order to make our estimate.
782 */
e00d996a
QC
783 delta = sample.jiffies - READ_ONCE(state->last_time);
784 WRITE_ONCE(state->last_time, sample.jiffies);
5e747dd9 785
e00d996a
QC
786 delta2 = delta - READ_ONCE(state->last_delta);
787 WRITE_ONCE(state->last_delta, delta);
5e747dd9 788
e00d996a
QC
789 delta3 = delta2 - READ_ONCE(state->last_delta2);
790 WRITE_ONCE(state->last_delta2, delta2);
5e747dd9
RV
791
792 if (delta < 0)
793 delta = -delta;
794 if (delta2 < 0)
795 delta2 = -delta2;
796 if (delta3 < 0)
797 delta3 = -delta3;
798 if (delta > delta2)
799 delta = delta2;
800 if (delta > delta3)
801 delta = delta3;
1da177e4 802
5e747dd9
RV
803 /*
804 * delta is now minimum absolute delta.
805 * Round down by 1 bit on general principles,
727d499a 806 * and limit entropy estimate to 12 bits.
5e747dd9 807 */
248045b8 808 credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
1da177e4
LT
809}
810
d251575a 811void add_input_randomness(unsigned int type, unsigned int code,
248045b8 812 unsigned int value)
1da177e4
LT
813{
814 static unsigned char last_value;
815
816 /* ignore autorepeat and the like */
817 if (value == last_value)
818 return;
819
1da177e4
LT
820 last_value = value;
821 add_timer_randomness(&input_timer_state,
822 (type << 4) ^ code ^ (code >> 4) ^ value);
c5704490 823 trace_add_input_randomness(input_pool.entropy_count);
1da177e4 824}
80fc9f53 825EXPORT_SYMBOL_GPL(add_input_randomness);
1da177e4 826
775f4b29
TT
827static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
828
43759d4f
TT
829#ifdef ADD_INTERRUPT_BENCH
830static unsigned long avg_cycles, avg_deviation;
831
248045b8
JD
832#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
833#define FIXED_1_2 (1 << (AVG_SHIFT - 1))
43759d4f
TT
834
835static void add_interrupt_bench(cycles_t start)
836{
248045b8 837 long delta = random_get_entropy() - start;
43759d4f 838
248045b8
JD
839 /* Use a weighted moving average */
840 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
841 avg_cycles += delta;
842 /* And average deviation */
843 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
844 avg_deviation += delta;
43759d4f
TT
845}
846#else
847#define add_interrupt_bench(x)
848#endif
849
d38bb085 850static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
ee3e00e9 851{
248045b8 852 u32 *ptr = (u32 *)regs;
92e75428 853 unsigned int idx;
ee3e00e9
TT
854
855 if (regs == NULL)
856 return 0;
92e75428 857 idx = READ_ONCE(f->reg_idx);
d38bb085 858 if (idx >= sizeof(struct pt_regs) / sizeof(u32))
92e75428
TT
859 idx = 0;
860 ptr += idx++;
861 WRITE_ONCE(f->reg_idx, idx);
9dfa7bba 862 return *ptr;
ee3e00e9
TT
863}
864
703f7066 865void add_interrupt_randomness(int irq)
1da177e4 866{
248045b8
JD
867 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
868 struct pt_regs *regs = get_irq_regs();
869 unsigned long now = jiffies;
870 cycles_t cycles = random_get_entropy();
871 u32 c_high, j_high;
872 u64 ip;
3060d6fe 873
ee3e00e9
TT
874 if (cycles == 0)
875 cycles = get_reg(fast_pool, regs);
655b2264
TT
876 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
877 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
43759d4f
TT
878 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
879 fast_pool->pool[1] ^= now ^ c_high;
655b2264 880 ip = regs ? instruction_pointer(regs) : _RET_IP_;
43759d4f 881 fast_pool->pool[2] ^= ip;
248045b8
JD
882 fast_pool->pool[3] ^=
883 (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
3060d6fe 884
43759d4f 885 fast_mix(fast_pool);
43759d4f 886 add_interrupt_bench(cycles);
3060d6fe 887
43838a23 888 if (unlikely(crng_init == 0)) {
e192be9d 889 if ((fast_pool->count >= 64) &&
d38bb085 890 crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
e192be9d
TT
891 fast_pool->count = 0;
892 fast_pool->last = now;
c30c575d
JD
893 if (spin_trylock(&input_pool.lock)) {
894 _mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
895 spin_unlock(&input_pool.lock);
896 }
e192be9d
TT
897 }
898 return;
899 }
900
248045b8 901 if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
1da177e4
LT
902 return;
903
90ed1e67 904 if (!spin_trylock(&input_pool.lock))
91fcb532 905 return;
83664a69 906
91fcb532 907 fast_pool->last = now;
90ed1e67
JD
908 __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
909 spin_unlock(&input_pool.lock);
83664a69 910
ee3e00e9 911 fast_pool->count = 0;
83664a69 912
ee3e00e9 913 /* award one bit for the contents of the fast pool */
90ed1e67 914 credit_entropy_bits(1);
1da177e4 915}
4b44f2d1 916EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1da177e4 917
9361401e 918#ifdef CONFIG_BLOCK
1da177e4
LT
919void add_disk_randomness(struct gendisk *disk)
920{
921 if (!disk || !disk->random)
922 return;
923 /* first major is 1, so we get >= 0x200 here */
f331c029 924 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
c5704490 925 trace_add_disk_randomness(disk_devt(disk), input_pool.entropy_count);
1da177e4 926}
bdcfa3e5 927EXPORT_SYMBOL_GPL(add_disk_randomness);
9361401e 928#endif
1da177e4 929
1da177e4
LT
930/*********************************************************************
931 *
932 * Entropy extraction routines
933 *
934 *********************************************************************/
935
19fa5be1 936/*
6e8ec255
JD
937 * This is an HKDF-like construction for using the hashed collected entropy
938 * as a PRF key, that's then expanded block-by-block.
19fa5be1 939 */
9c07f578 940static void extract_entropy(void *buf, size_t nbytes)
1da177e4 941{
902c098a 942 unsigned long flags;
6e8ec255
JD
943 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
944 struct {
28f425e5 945 unsigned long rdseed[32 / sizeof(long)];
6e8ec255
JD
946 size_t counter;
947 } block;
948 size_t i;
949
c5704490 950 trace_extract_entropy(nbytes, input_pool.entropy_count);
9c07f578 951
28f425e5
JD
952 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
953 if (!arch_get_random_seed_long(&block.rdseed[i]) &&
954 !arch_get_random_long(&block.rdseed[i]))
955 block.rdseed[i] = random_get_entropy();
85a1f777
TT
956 }
957
90ed1e67 958 spin_lock_irqsave(&input_pool.lock, flags);
46884442 959
6e8ec255
JD
960 /* seed = HASHPRF(last_key, entropy_input) */
961 blake2s_final(&input_pool.hash, seed);
1da177e4 962
28f425e5 963 /* next_key = HASHPRF(seed, RDSEED || 0) */
6e8ec255
JD
964 block.counter = 0;
965 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
966 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
1da177e4 967
6e8ec255
JD
968 spin_unlock_irqrestore(&input_pool.lock, flags);
969 memzero_explicit(next_key, sizeof(next_key));
e192be9d
TT
970
971 while (nbytes) {
6e8ec255 972 i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
28f425e5 973 /* output = HASHPRF(seed, RDSEED || ++counter) */
6e8ec255
JD
974 ++block.counter;
975 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
e192be9d
TT
976 nbytes -= i;
977 buf += i;
e192be9d
TT
978 }
979
6e8ec255
JD
980 memzero_explicit(seed, sizeof(seed));
981 memzero_explicit(&block, sizeof(block));
e192be9d
TT
982}
983
eecabf56 984#define warn_unseeded_randomness(previous) \
248045b8 985 _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
eecabf56 986
248045b8 987static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
eecabf56
TT
988{
989#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
990 const bool print_once = false;
991#else
992 static bool print_once __read_mostly;
993#endif
994
248045b8 995 if (print_once || crng_ready() ||
eecabf56
TT
996 (previous && (caller == READ_ONCE(*previous))))
997 return;
998 WRITE_ONCE(*previous, caller);
999#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1000 print_once = true;
1001#endif
4e00b339 1002 if (__ratelimit(&unseeded_warning))
248045b8
JD
1003 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
1004 func_name, caller, crng_init);
eecabf56
TT
1005}
1006
1da177e4
LT
1007/*
1008 * This function is the exported kernel interface. It returns some
c2557a30 1009 * number of good random numbers, suitable for key generation, seeding
18e9cea7
GP
1010 * TCP sequence numbers, etc. It does not rely on the hardware random
1011 * number generator. For random bytes direct from the hardware RNG
e297a783
JD
1012 * (when available), use get_random_bytes_arch(). In order to ensure
1013 * that the randomness provided by this function is okay, the function
1014 * wait_for_random_bytes() should be called and return 0 at least once
1015 * at any point prior.
1da177e4 1016 */
eecabf56 1017static void _get_random_bytes(void *buf, int nbytes)
c2557a30 1018{
186873c5
JD
1019 u32 chacha_state[CHACHA_STATE_WORDS];
1020 u8 tmp[CHACHA_BLOCK_SIZE];
1021 ssize_t len;
e192be9d 1022
5910895f 1023 trace_get_random_bytes(nbytes, _RET_IP_);
e192be9d 1024
186873c5
JD
1025 if (!nbytes)
1026 return;
1027
1028 len = min_t(ssize_t, 32, nbytes);
1029 crng_make_state(chacha_state, buf, len);
1030 nbytes -= len;
1031 buf += len;
1032
1033 while (nbytes) {
1034 if (nbytes < CHACHA_BLOCK_SIZE) {
1035 chacha20_block(chacha_state, tmp);
1036 memcpy(buf, tmp, nbytes);
1037 memzero_explicit(tmp, sizeof(tmp));
1038 break;
1039 }
1040
1041 chacha20_block(chacha_state, buf);
1042 if (unlikely(chacha_state[12] == 0))
1043 ++chacha_state[13];
1ca1b917 1044 nbytes -= CHACHA_BLOCK_SIZE;
186873c5 1045 buf += CHACHA_BLOCK_SIZE;
e192be9d
TT
1046 }
1047
186873c5 1048 memzero_explicit(chacha_state, sizeof(chacha_state));
c2557a30 1049}
eecabf56
TT
1050
1051void get_random_bytes(void *buf, int nbytes)
1052{
1053 static void *previous;
1054
1055 warn_unseeded_randomness(&previous);
1056 _get_random_bytes(buf, nbytes);
1057}
c2557a30
TT
1058EXPORT_SYMBOL(get_random_bytes);
1059
50ee7529
LT
1060/*
1061 * Each time the timer fires, we expect that we got an unpredictable
1062 * jump in the cycle counter. Even if the timer is running on another
1063 * CPU, the timer activity will be touching the stack of the CPU that is
1064 * generating entropy..
1065 *
1066 * Note that we don't re-arm the timer in the timer itself - we are
1067 * happy to be scheduled away, since that just makes the load more
1068 * complex, but we do not want the timer to keep ticking unless the
1069 * entropy loop is running.
1070 *
1071 * So the re-arming always happens in the entropy loop itself.
1072 */
1073static void entropy_timer(struct timer_list *t)
1074{
90ed1e67 1075 credit_entropy_bits(1);
50ee7529
LT
1076}
1077
1078/*
1079 * If we have an actual cycle counter, see if we can
1080 * generate enough entropy with timing noise
1081 */
1082static void try_to_generate_entropy(void)
1083{
1084 struct {
1085 unsigned long now;
1086 struct timer_list timer;
1087 } stack;
1088
1089 stack.now = random_get_entropy();
1090
1091 /* Slow counter - or none. Don't even bother */
1092 if (stack.now == random_get_entropy())
1093 return;
1094
1095 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1096 while (!crng_ready()) {
1097 if (!timer_pending(&stack.timer))
248045b8 1098 mod_timer(&stack.timer, jiffies + 1);
90ed1e67 1099 mix_pool_bytes(&stack.now, sizeof(stack.now));
50ee7529
LT
1100 schedule();
1101 stack.now = random_get_entropy();
1102 }
1103
1104 del_timer_sync(&stack.timer);
1105 destroy_timer_on_stack(&stack.timer);
90ed1e67 1106 mix_pool_bytes(&stack.now, sizeof(stack.now));
50ee7529
LT
1107}
1108
e297a783
JD
1109/*
1110 * Wait for the urandom pool to be seeded and thus guaranteed to supply
1111 * cryptographically secure random numbers. This applies to: the /dev/urandom
1112 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1113 * family of functions. Using any of these functions without first calling
1114 * this function forfeits the guarantee of security.
1115 *
1116 * Returns: 0 if the urandom pool has been seeded.
1117 * -ERESTARTSYS if the function was interrupted by a signal.
1118 */
1119int wait_for_random_bytes(void)
1120{
1121 if (likely(crng_ready()))
1122 return 0;
50ee7529
LT
1123
1124 do {
1125 int ret;
1126 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1127 if (ret)
1128 return ret > 0 ? 0 : ret;
1129
1130 try_to_generate_entropy();
1131 } while (!crng_ready());
1132
1133 return 0;
e297a783
JD
1134}
1135EXPORT_SYMBOL(wait_for_random_bytes);
1136
9a47249d
JD
1137/*
1138 * Returns whether or not the urandom pool has been seeded and thus guaranteed
1139 * to supply cryptographically secure random numbers. This applies to: the
1140 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1141 * ,u64,int,long} family of functions.
1142 *
1143 * Returns: true if the urandom pool has been seeded.
1144 * false if the urandom pool has not been seeded.
1145 */
1146bool rng_is_initialized(void)
1147{
1148 return crng_ready();
1149}
1150EXPORT_SYMBOL(rng_is_initialized);
1151
205a525c
HX
1152/*
1153 * Add a callback function that will be invoked when the nonblocking
1154 * pool is initialised.
1155 *
1156 * returns: 0 if callback is successfully added
1157 * -EALREADY if pool is already initialised (callback not called)
1158 * -ENOENT if module for callback is not alive
1159 */
1160int add_random_ready_callback(struct random_ready_callback *rdy)
1161{
1162 struct module *owner;
1163 unsigned long flags;
1164 int err = -EALREADY;
1165
e192be9d 1166 if (crng_ready())
205a525c
HX
1167 return err;
1168
1169 owner = rdy->owner;
1170 if (!try_module_get(owner))
1171 return -ENOENT;
1172
1173 spin_lock_irqsave(&random_ready_list_lock, flags);
e192be9d 1174 if (crng_ready())
205a525c
HX
1175 goto out;
1176
1177 owner = NULL;
1178
1179 list_add(&rdy->list, &random_ready_list);
1180 err = 0;
1181
1182out:
1183 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1184
1185 module_put(owner);
1186
1187 return err;
1188}
1189EXPORT_SYMBOL(add_random_ready_callback);
1190
1191/*
1192 * Delete a previously registered readiness callback function.
1193 */
1194void del_random_ready_callback(struct random_ready_callback *rdy)
1195{
1196 unsigned long flags;
1197 struct module *owner = NULL;
1198
1199 spin_lock_irqsave(&random_ready_list_lock, flags);
1200 if (!list_empty(&rdy->list)) {
1201 list_del_init(&rdy->list);
1202 owner = rdy->owner;
1203 }
1204 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1205
1206 module_put(owner);
1207}
1208EXPORT_SYMBOL(del_random_ready_callback);
1209
c2557a30
TT
1210/*
1211 * This function will use the architecture-specific hardware random
1212 * number generator if it is available. The arch-specific hw RNG will
1213 * almost certainly be faster than what we can do in software, but it
1214 * is impossible to verify that it is implemented securely (as
1215 * opposed, to, say, the AES encryption of a sequence number using a
1216 * key known by the NSA). So it's useful if we need the speed, but
1217 * only if we're willing to trust the hardware manufacturer not to
1218 * have put in a back door.
753d433b
TH
1219 *
1220 * Return number of bytes filled in.
c2557a30 1221 */
753d433b 1222int __must_check get_random_bytes_arch(void *buf, int nbytes)
1da177e4 1223{
753d433b 1224 int left = nbytes;
d38bb085 1225 u8 *p = buf;
63d77173 1226
753d433b
TH
1227 trace_get_random_bytes_arch(left, _RET_IP_);
1228 while (left) {
63d77173 1229 unsigned long v;
753d433b 1230 int chunk = min_t(int, left, sizeof(unsigned long));
c2557a30 1231
63d77173
PA
1232 if (!arch_get_random_long(&v))
1233 break;
8ddd6efa 1234
bd29e568 1235 memcpy(p, &v, chunk);
63d77173 1236 p += chunk;
753d433b 1237 left -= chunk;
63d77173
PA
1238 }
1239
753d433b 1240 return nbytes - left;
1da177e4 1241}
c2557a30
TT
1242EXPORT_SYMBOL(get_random_bytes_arch);
1243
85664172
JD
1244static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
1245static int __init parse_trust_cpu(char *arg)
1246{
1247 return kstrtobool(arg, &trust_cpu);
1248}
1249early_param("random.trust_cpu", parse_trust_cpu);
1250
1da177e4 1251/*
85664172
JD
1252 * Note that setup_arch() may call add_device_randomness()
1253 * long before we get here. This allows seeding of the pools
1254 * with some platform dependent data very early in the boot
1255 * process. But it limits our options here. We must use
1256 * statically allocated structures that already have all
1257 * initializations complete at compile time. We should also
1258 * take care not to overwrite the precious per platform data
1259 * we were given.
1da177e4 1260 */
85664172 1261int __init rand_initialize(void)
1da177e4 1262{
3e88bdff 1263 int i;
902c098a 1264 ktime_t now = ktime_get_real();
85664172 1265 bool arch_init = true;
902c098a 1266 unsigned long rv;
1da177e4 1267
6e8ec255 1268 for (i = BLAKE2S_BLOCK_SIZE; i > 0; i -= sizeof(rv)) {
85664172
JD
1269 if (!arch_get_random_seed_long_early(&rv) &&
1270 !arch_get_random_long_early(&rv)) {
1271 rv = random_get_entropy();
1272 arch_init = false;
1273 }
a02cf3d0 1274 mix_pool_bytes(&rv, sizeof(rv));
85664172 1275 }
a02cf3d0
JD
1276 mix_pool_bytes(&now, sizeof(now));
1277 mix_pool_bytes(utsname(), sizeof(*(utsname())));
1278
186873c5 1279 extract_entropy(base_crng.key, sizeof(base_crng.key));
85664172
JD
1280 if (arch_init && trust_cpu && crng_init < 2) {
1281 invalidate_batched_entropy();
1282 crng_init = 2;
1283 pr_notice("crng init done (trusting CPU's manufacturer)\n");
1284 }
85664172 1285
4e00b339
TT
1286 if (ratelimit_disable) {
1287 urandom_warning.interval = 0;
1288 unseeded_warning.interval = 0;
1289 }
1da177e4
LT
1290 return 0;
1291}
1da177e4 1292
9361401e 1293#ifdef CONFIG_BLOCK
1da177e4
LT
1294void rand_initialize_disk(struct gendisk *disk)
1295{
1296 struct timer_rand_state *state;
1297
1298 /*
f8595815 1299 * If kzalloc returns null, we just won't use that entropy
1da177e4
LT
1300 * source.
1301 */
f8595815 1302 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
644008df
TT
1303 if (state) {
1304 state->last_time = INITIAL_JIFFIES;
1da177e4 1305 disk->random = state;
644008df 1306 }
1da177e4 1307}
9361401e 1308#endif
1da177e4 1309
248045b8
JD
1310static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
1311 size_t nbytes, loff_t *ppos)
c6f1deb1
AL
1312{
1313 int ret;
1314
c5704490 1315 nbytes = min_t(size_t, nbytes, INT_MAX >> 6);
186873c5 1316 ret = get_random_bytes_user(buf, nbytes);
c5704490 1317 trace_urandom_read(8 * nbytes, 0, input_pool.entropy_count);
c6f1deb1
AL
1318 return ret;
1319}
1320
248045b8
JD
1321static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1322 loff_t *ppos)
1da177e4 1323{
9b4d0087 1324 static int maxwarn = 10;
301f0595 1325
e192be9d 1326 if (!crng_ready() && maxwarn > 0) {
9b4d0087 1327 maxwarn--;
4e00b339 1328 if (__ratelimit(&urandom_warning))
12cd53af
YL
1329 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1330 current->comm, nbytes);
9b4d0087 1331 }
c6f1deb1
AL
1332
1333 return urandom_read_nowarn(file, buf, nbytes, ppos);
1da177e4
LT
1334}
1335
248045b8
JD
1336static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1337 loff_t *ppos)
30c08efe
AL
1338{
1339 int ret;
1340
1341 ret = wait_for_random_bytes();
1342 if (ret != 0)
1343 return ret;
1344 return urandom_read_nowarn(file, buf, nbytes, ppos);
1345}
1346
248045b8 1347static __poll_t random_poll(struct file *file, poll_table *wait)
1da177e4 1348{
a11e1d43 1349 __poll_t mask;
1da177e4 1350
30c08efe 1351 poll_wait(file, &crng_init_wait, wait);
a11e1d43
LT
1352 poll_wait(file, &random_write_wait, wait);
1353 mask = 0;
30c08efe 1354 if (crng_ready())
a9a08845 1355 mask |= EPOLLIN | EPOLLRDNORM;
489c7fc4 1356 if (input_pool.entropy_count < POOL_MIN_BITS)
a9a08845 1357 mask |= EPOLLOUT | EPOLLWRNORM;
1da177e4
LT
1358 return mask;
1359}
1360
248045b8 1361static int write_pool(const char __user *buffer, size_t count)
1da177e4 1362{
1da177e4 1363 size_t bytes;
91c2afca 1364 u8 buf[BLAKE2S_BLOCK_SIZE];
1da177e4 1365 const char __user *p = buffer;
1da177e4 1366
7f397dcd
MM
1367 while (count > 0) {
1368 bytes = min(count, sizeof(buf));
91c2afca 1369 if (copy_from_user(buf, p, bytes))
7f397dcd 1370 return -EFAULT;
7f397dcd 1371 count -= bytes;
1da177e4 1372 p += bytes;
90ed1e67 1373 mix_pool_bytes(buf, bytes);
91f3f1e3 1374 cond_resched();
1da177e4 1375 }
7f397dcd
MM
1376
1377 return 0;
1378}
1379
90b75ee5
MM
1380static ssize_t random_write(struct file *file, const char __user *buffer,
1381 size_t count, loff_t *ppos)
7f397dcd
MM
1382{
1383 size_t ret;
7f397dcd 1384
90ed1e67 1385 ret = write_pool(buffer, count);
7f397dcd
MM
1386 if (ret)
1387 return ret;
1388
7f397dcd 1389 return (ssize_t)count;
1da177e4
LT
1390}
1391
43ae4860 1392static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1da177e4
LT
1393{
1394 int size, ent_count;
1395 int __user *p = (int __user *)arg;
1396 int retval;
1397
1398 switch (cmd) {
1399 case RNDGETENTCNT:
43ae4860 1400 /* inherently racy, no point locking */
c5704490 1401 if (put_user(input_pool.entropy_count, p))
1da177e4
LT
1402 return -EFAULT;
1403 return 0;
1404 case RNDADDTOENTCNT:
1405 if (!capable(CAP_SYS_ADMIN))
1406 return -EPERM;
1407 if (get_user(ent_count, p))
1408 return -EFAULT;
a49c010e
JD
1409 if (ent_count < 0)
1410 return -EINVAL;
1411 credit_entropy_bits(ent_count);
1412 return 0;
1da177e4
LT
1413 case RNDADDENTROPY:
1414 if (!capable(CAP_SYS_ADMIN))
1415 return -EPERM;
1416 if (get_user(ent_count, p++))
1417 return -EFAULT;
1418 if (ent_count < 0)
1419 return -EINVAL;
1420 if (get_user(size, p++))
1421 return -EFAULT;
90ed1e67 1422 retval = write_pool((const char __user *)p, size);
1da177e4
LT
1423 if (retval < 0)
1424 return retval;
a49c010e
JD
1425 credit_entropy_bits(ent_count);
1426 return 0;
1da177e4
LT
1427 case RNDZAPENTCNT:
1428 case RNDCLEARPOOL:
ae9ecd92
TT
1429 /*
1430 * Clear the entropy pool counters. We no longer clear
1431 * the entropy pool, as that's silly.
1432 */
1da177e4
LT
1433 if (!capable(CAP_SYS_ADMIN))
1434 return -EPERM;
489c7fc4 1435 if (xchg(&input_pool.entropy_count, 0)) {
042e293e
JD
1436 wake_up_interruptible(&random_write_wait);
1437 kill_fasync(&fasync, SIGIO, POLL_OUT);
1438 }
1da177e4 1439 return 0;
d848e5f8
TT
1440 case RNDRESEEDCRNG:
1441 if (!capable(CAP_SYS_ADMIN))
1442 return -EPERM;
1443 if (crng_init < 2)
1444 return -ENODATA;
a9412d51 1445 crng_reseed();
d848e5f8 1446 return 0;
1da177e4
LT
1447 default:
1448 return -EINVAL;
1449 }
1450}
1451
9a6f70bb
JD
1452static int random_fasync(int fd, struct file *filp, int on)
1453{
1454 return fasync_helper(fd, filp, on, &fasync);
1455}
1456
2b8693c0 1457const struct file_operations random_fops = {
248045b8 1458 .read = random_read,
1da177e4 1459 .write = random_write,
248045b8 1460 .poll = random_poll,
43ae4860 1461 .unlocked_ioctl = random_ioctl,
507e4e2b 1462 .compat_ioctl = compat_ptr_ioctl,
9a6f70bb 1463 .fasync = random_fasync,
6038f373 1464 .llseek = noop_llseek,
1da177e4
LT
1465};
1466
2b8693c0 1467const struct file_operations urandom_fops = {
248045b8 1468 .read = urandom_read,
1da177e4 1469 .write = random_write,
43ae4860 1470 .unlocked_ioctl = random_ioctl,
4aa37c46 1471 .compat_ioctl = compat_ptr_ioctl,
9a6f70bb 1472 .fasync = random_fasync,
6038f373 1473 .llseek = noop_llseek,
1da177e4
LT
1474};
1475
248045b8
JD
1476SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1477 flags)
c6e9d6f3 1478{
e297a783
JD
1479 int ret;
1480
248045b8 1481 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
75551dbf
AL
1482 return -EINVAL;
1483
1484 /*
1485 * Requesting insecure and blocking randomness at the same time makes
1486 * no sense.
1487 */
248045b8 1488 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
c6e9d6f3
TT
1489 return -EINVAL;
1490
1491 if (count > INT_MAX)
1492 count = INT_MAX;
1493
75551dbf 1494 if (!(flags & GRND_INSECURE) && !crng_ready()) {
c6e9d6f3
TT
1495 if (flags & GRND_NONBLOCK)
1496 return -EAGAIN;
e297a783
JD
1497 ret = wait_for_random_bytes();
1498 if (unlikely(ret))
1499 return ret;
c6e9d6f3 1500 }
c6f1deb1 1501 return urandom_read_nowarn(NULL, buf, count, NULL);
c6e9d6f3
TT
1502}
1503
1da177e4
LT
1504/********************************************************************
1505 *
1506 * Sysctl interface
1507 *
1508 ********************************************************************/
1509
1510#ifdef CONFIG_SYSCTL
1511
1512#include <linux/sysctl.h>
1513
db61ffe3 1514static int random_min_urandom_seed = 60;
489c7fc4
JD
1515static int random_write_wakeup_bits = POOL_MIN_BITS;
1516static int sysctl_poolsize = POOL_BITS;
1da177e4
LT
1517static char sysctl_bootid[16];
1518
1519/*
f22052b2 1520 * This function is used to return both the bootid UUID, and random
1da177e4
LT
1521 * UUID. The difference is in whether table->data is NULL; if it is,
1522 * then a new UUID is generated and returned to the user.
1523 *
f22052b2
GP
1524 * If the user accesses this via the proc interface, the UUID will be
1525 * returned as an ASCII string in the standard UUID format; if via the
1526 * sysctl system call, as 16 bytes of binary data.
1da177e4 1527 */
248045b8
JD
1528static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1529 size_t *lenp, loff_t *ppos)
1da177e4 1530{
a151427e 1531 struct ctl_table fake_table;
1da177e4
LT
1532 unsigned char buf[64], tmp_uuid[16], *uuid;
1533
1534 uuid = table->data;
1535 if (!uuid) {
1536 uuid = tmp_uuid;
1da177e4 1537 generate_random_uuid(uuid);
44e4360f
MD
1538 } else {
1539 static DEFINE_SPINLOCK(bootid_spinlock);
1540
1541 spin_lock(&bootid_spinlock);
1542 if (!uuid[8])
1543 generate_random_uuid(uuid);
1544 spin_unlock(&bootid_spinlock);
1545 }
1da177e4 1546
35900771
JP
1547 sprintf(buf, "%pU", uuid);
1548
1da177e4
LT
1549 fake_table.data = buf;
1550 fake_table.maxlen = sizeof(buf);
1551
8d65af78 1552 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1da177e4
LT
1553}
1554
5475e8f0 1555static struct ctl_table random_table[] = {
1da177e4 1556 {
1da177e4
LT
1557 .procname = "poolsize",
1558 .data = &sysctl_poolsize,
1559 .maxlen = sizeof(int),
1560 .mode = 0444,
6d456111 1561 .proc_handler = proc_dointvec,
1da177e4
LT
1562 },
1563 {
1da177e4 1564 .procname = "entropy_avail",
c5704490 1565 .data = &input_pool.entropy_count,
1da177e4
LT
1566 .maxlen = sizeof(int),
1567 .mode = 0444,
c5704490 1568 .proc_handler = proc_dointvec,
1da177e4 1569 },
1da177e4 1570 {
1da177e4 1571 .procname = "write_wakeup_threshold",
2132a96f 1572 .data = &random_write_wakeup_bits,
1da177e4
LT
1573 .maxlen = sizeof(int),
1574 .mode = 0644,
489c7fc4 1575 .proc_handler = proc_dointvec,
1da177e4 1576 },
f5c2742c
TT
1577 {
1578 .procname = "urandom_min_reseed_secs",
1579 .data = &random_min_urandom_seed,
1580 .maxlen = sizeof(int),
1581 .mode = 0644,
1582 .proc_handler = proc_dointvec,
1583 },
1da177e4 1584 {
1da177e4
LT
1585 .procname = "boot_id",
1586 .data = &sysctl_bootid,
1587 .maxlen = 16,
1588 .mode = 0444,
6d456111 1589 .proc_handler = proc_do_uuid,
1da177e4
LT
1590 },
1591 {
1da177e4
LT
1592 .procname = "uuid",
1593 .maxlen = 16,
1594 .mode = 0444,
6d456111 1595 .proc_handler = proc_do_uuid,
1da177e4 1596 },
43759d4f
TT
1597#ifdef ADD_INTERRUPT_BENCH
1598 {
1599 .procname = "add_interrupt_avg_cycles",
1600 .data = &avg_cycles,
1601 .maxlen = sizeof(avg_cycles),
1602 .mode = 0444,
1603 .proc_handler = proc_doulongvec_minmax,
1604 },
1605 {
1606 .procname = "add_interrupt_avg_deviation",
1607 .data = &avg_deviation,
1608 .maxlen = sizeof(avg_deviation),
1609 .mode = 0444,
1610 .proc_handler = proc_doulongvec_minmax,
1611 },
1612#endif
894d2491 1613 { }
1da177e4 1614};
5475e8f0
XN
1615
1616/*
1617 * rand_initialize() is called before sysctl_init(),
1618 * so we cannot call register_sysctl_init() in rand_initialize()
1619 */
1620static int __init random_sysctls_init(void)
1621{
1622 register_sysctl_init("kernel/random", random_table);
1623 return 0;
1624}
1625device_initcall(random_sysctls_init);
248045b8 1626#endif /* CONFIG_SYSCTL */
1da177e4 1627
77760fd7
JD
1628static atomic_t batch_generation = ATOMIC_INIT(0);
1629
f5b98461
JD
1630struct batched_entropy {
1631 union {
186873c5
JD
1632 /*
1633 * We make this 1.5x a ChaCha block, so that we get the
1634 * remaining 32 bytes from fast key erasure, plus one full
1635 * block from the detached ChaCha state. We can increase
1636 * the size of this later if needed so long as we keep the
1637 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
1638 */
1639 u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
1640 u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
f5b98461 1641 };
77760fd7 1642 local_lock_t lock;
f5b98461 1643 unsigned int position;
77760fd7 1644 int generation;
f5b98461 1645};
b1132dea 1646
1da177e4 1647/*
f5b98461 1648 * Get a random word for internal kernel use only. The quality of the random
186873c5
JD
1649 * number is good as /dev/urandom. In order to ensure that the randomness
1650 * provided by this function is okay, the function wait_for_random_bytes()
1651 * should be called and return 0 at least once at any point prior.
1da177e4 1652 */
b7d5dc21 1653static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
186873c5
JD
1654 .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
1655 .position = UINT_MAX
b7d5dc21
SAS
1656};
1657
c440408c 1658u64 get_random_u64(void)
1da177e4 1659{
c440408c 1660 u64 ret;
b7d5dc21 1661 unsigned long flags;
f5b98461 1662 struct batched_entropy *batch;
eecabf56 1663 static void *previous;
77760fd7 1664 int next_gen;
8a0a9bd4 1665
eecabf56 1666 warn_unseeded_randomness(&previous);
d06bfd19 1667
77760fd7 1668 local_lock_irqsave(&batched_entropy_u64.lock, flags);
b7d5dc21 1669 batch = raw_cpu_ptr(&batched_entropy_u64);
77760fd7
JD
1670
1671 next_gen = atomic_read(&batch_generation);
186873c5 1672 if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
77760fd7 1673 next_gen != batch->generation) {
186873c5 1674 _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
f5b98461 1675 batch->position = 0;
77760fd7 1676 batch->generation = next_gen;
f5b98461 1677 }
77760fd7 1678
186873c5
JD
1679 ret = batch->entropy_u64[batch->position];
1680 batch->entropy_u64[batch->position] = 0;
1681 ++batch->position;
77760fd7 1682 local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
8a0a9bd4 1683 return ret;
1da177e4 1684}
c440408c 1685EXPORT_SYMBOL(get_random_u64);
1da177e4 1686
b7d5dc21 1687static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
186873c5
JD
1688 .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
1689 .position = UINT_MAX
b7d5dc21 1690};
77760fd7 1691
c440408c 1692u32 get_random_u32(void)
f5b98461 1693{
c440408c 1694 u32 ret;
b7d5dc21 1695 unsigned long flags;
f5b98461 1696 struct batched_entropy *batch;
eecabf56 1697 static void *previous;
77760fd7 1698 int next_gen;
ec9ee4ac 1699
eecabf56 1700 warn_unseeded_randomness(&previous);
d06bfd19 1701
77760fd7 1702 local_lock_irqsave(&batched_entropy_u32.lock, flags);
b7d5dc21 1703 batch = raw_cpu_ptr(&batched_entropy_u32);
77760fd7
JD
1704
1705 next_gen = atomic_read(&batch_generation);
186873c5 1706 if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
77760fd7 1707 next_gen != batch->generation) {
186873c5 1708 _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
f5b98461 1709 batch->position = 0;
77760fd7 1710 batch->generation = next_gen;
f5b98461 1711 }
77760fd7 1712
186873c5
JD
1713 ret = batch->entropy_u32[batch->position];
1714 batch->entropy_u32[batch->position] = 0;
1715 ++batch->position;
77760fd7 1716 local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
ec9ee4ac
DC
1717 return ret;
1718}
c440408c 1719EXPORT_SYMBOL(get_random_u32);
ec9ee4ac 1720
b169c13d
JD
1721/* It's important to invalidate all potential batched entropy that might
1722 * be stored before the crng is initialized, which we can do lazily by
77760fd7
JD
1723 * bumping the generation counter.
1724 */
b169c13d
JD
1725static void invalidate_batched_entropy(void)
1726{
77760fd7 1727 atomic_inc(&batch_generation);
b169c13d
JD
1728}
1729
99fdafde
JC
1730/**
1731 * randomize_page - Generate a random, page aligned address
1732 * @start: The smallest acceptable address the caller will take.
1733 * @range: The size of the area, starting at @start, within which the
1734 * random address must fall.
1735 *
1736 * If @start + @range would overflow, @range is capped.
1737 *
1738 * NOTE: Historical use of randomize_range, which this replaces, presumed that
1739 * @start was already page aligned. We now align it regardless.
1740 *
1741 * Return: A page aligned address within [start, start + range). On error,
1742 * @start is returned.
1743 */
248045b8 1744unsigned long randomize_page(unsigned long start, unsigned long range)
99fdafde
JC
1745{
1746 if (!PAGE_ALIGNED(start)) {
1747 range -= PAGE_ALIGN(start) - start;
1748 start = PAGE_ALIGN(start);
1749 }
1750
1751 if (start > ULONG_MAX - range)
1752 range = ULONG_MAX - start;
1753
1754 range >>= PAGE_SHIFT;
1755
1756 if (range == 0)
1757 return start;
1758
1759 return start + (get_random_long() % range << PAGE_SHIFT);
1760}
1761
c84dbf61
TD
1762/* Interface for in-kernel drivers of true hardware RNGs.
1763 * Those devices may produce endless random bits and will be throttled
1764 * when our pool is full.
1765 */
1766void add_hwgenerator_randomness(const char *buffer, size_t count,
1767 size_t entropy)
1768{
43838a23 1769 if (unlikely(crng_init == 0)) {
73c7733f 1770 size_t ret = crng_fast_load(buffer, count);
90ed1e67 1771 mix_pool_bytes(buffer, ret);
73c7733f
JD
1772 count -= ret;
1773 buffer += ret;
1774 if (!count || crng_init == 0)
1775 return;
3371f3da 1776 }
e192be9d 1777
c321e907 1778 /* Throttle writing if we're above the trickle threshold.
489c7fc4
JD
1779 * We'll be woken up again once below POOL_MIN_BITS, when
1780 * the calling thread is about to terminate, or once
1781 * CRNG_RESEED_INTERVAL has elapsed.
e192be9d 1782 */
c321e907 1783 wait_event_interruptible_timeout(random_write_wait,
f7e67b8e 1784 !system_wq || kthread_should_stop() ||
489c7fc4 1785 input_pool.entropy_count < POOL_MIN_BITS,
c321e907 1786 CRNG_RESEED_INTERVAL);
90ed1e67
JD
1787 mix_pool_bytes(buffer, count);
1788 credit_entropy_bits(entropy);
c84dbf61
TD
1789}
1790EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
428826f5
HYW
1791
1792/* Handle random seed passed by bootloader.
1793 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
1794 * it would be regarded as device data.
1795 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
1796 */
1797void add_bootloader_randomness(const void *buf, unsigned int size)
1798{
1799 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
1800 add_hwgenerator_randomness(buf, size, size * 8);
1801 else
1802 add_device_randomness(buf, size);
1803}
3fd57e7a 1804EXPORT_SYMBOL_GPL(add_bootloader_randomness);