random: zero buffer after reading entropy from userspace
[linux-block.git] / drivers / char / random.c
CommitLineData
1da177e4
LT
1/*
2 * random.c -- A strong random number generator
3 *
9f9eff85 4 * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
b169c13d 5 *
9e95ce27 6 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
1da177e4
LT
7 *
8 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
9 * rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, and the entire permission notice in its entirety,
16 * including the disclaimer of warranties.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. The name of the author may not be used to endorse or promote
21 * products derived from this software without specific prior
22 * written permission.
23 *
24 * ALTERNATIVELY, this product may be distributed under the terms of
25 * the GNU General Public License, in which case the provisions of the GPL are
26 * required INSTEAD OF the above restrictions. (This clause is
27 * necessary due to a potential bad interaction between the GPL and
28 * the restrictions contained in a BSD-style copyright.)
29 *
30 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
31 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
33 * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
34 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
35 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
36 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
37 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
38 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
40 * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
41 * DAMAGE.
42 */
43
44/*
1da177e4
LT
45 * Exported interfaces ---- output
46 * ===============================
47 *
92e507d2 48 * There are four exported interfaces; two for use within the kernel,
c0a8a61e 49 * and two for use from userspace.
1da177e4 50 *
92e507d2
GS
51 * Exported interfaces ---- userspace output
52 * -----------------------------------------
1da177e4 53 *
92e507d2 54 * The userspace interfaces are two character devices /dev/random and
1da177e4
LT
55 * /dev/urandom. /dev/random is suitable for use when very high
56 * quality randomness is desired (for example, for key generation or
57 * one-time pads), as it will only return a maximum of the number of
58 * bits of randomness (as estimated by the random number generator)
59 * contained in the entropy pool.
60 *
61 * The /dev/urandom device does not have this limit, and will return
62 * as many bytes as are requested. As more and more random bytes are
63 * requested without giving time for the entropy pool to recharge,
64 * this will result in random numbers that are merely cryptographically
65 * strong. For many applications, however, this is acceptable.
66 *
92e507d2
GS
67 * Exported interfaces ---- kernel output
68 * --------------------------------------
69 *
186873c5 70 * The primary kernel interfaces are:
92e507d2 71 *
04ec96b7 72 * void get_random_bytes(void *buf, size_t nbytes);
248045b8
JD
73 * u32 get_random_u32()
74 * u64 get_random_u64()
75 * unsigned int get_random_int()
76 * unsigned long get_random_long()
92e507d2 77 *
186873c5
JD
78 * These interfaces will return the requested number of random bytes
79 * into the given buffer or as a return value. This is equivalent to a
80 * read from /dev/urandom. The get_random_{u32,u64,int,long}() family
81 * of functions may be higher performance for one-off random integers,
82 * because they do a bit of buffering.
92e507d2
GS
83 *
84 * prandom_u32()
85 * -------------
86 *
87 * For even weaker applications, see the pseudorandom generator
88 * prandom_u32(), prandom_max(), and prandom_bytes(). If the random
89 * numbers aren't security-critical at all, these are *far* cheaper.
90 * Useful for self-tests, random error simulation, randomized backoffs,
91 * and any other application where you trust that nobody is trying to
92 * maliciously mess with you by guessing the "random" numbers.
93 *
1da177e4
LT
94 * Exported interfaces ---- input
95 * ==============================
96 *
97 * The current exported interfaces for gathering environmental noise
98 * from the devices are:
99 *
04ec96b7 100 * void add_device_randomness(const void *buf, size_t size);
248045b8 101 * void add_input_randomness(unsigned int type, unsigned int code,
1da177e4 102 * unsigned int value);
703f7066 103 * void add_interrupt_randomness(int irq);
248045b8 104 * void add_disk_randomness(struct gendisk *disk);
04ec96b7 105 * void add_hwgenerator_randomness(const void *buffer, size_t count,
2b6c6e3d 106 * size_t entropy);
04ec96b7 107 * void add_bootloader_randomness(const void *buf, size_t size);
1da177e4 108 *
a2080a67
LT
109 * add_device_randomness() is for adding data to the random pool that
110 * is likely to differ between two devices (or possibly even per boot).
111 * This would be things like MAC addresses or serial numbers, or the
112 * read-out of the RTC. This does *not* add any actual entropy to the
113 * pool, but it initializes the pool to different values for devices
114 * that might otherwise be identical and have very little entropy
115 * available to them (particularly common in the embedded world).
116 *
1da177e4
LT
117 * add_input_randomness() uses the input layer interrupt timing, as well as
118 * the event type information from the hardware.
119 *
775f4b29
TT
120 * add_interrupt_randomness() uses the interrupt timing as random
121 * inputs to the entropy pool. Using the cycle counters and the irq source
122 * as inputs, it feeds the randomness roughly once a second.
442a4fff
JW
123 *
124 * add_disk_randomness() uses what amounts to the seek time of block
125 * layer request events, on a per-disk_devt basis, as input to the
126 * entropy pool. Note that high-speed solid state drives with very low
127 * seek times do not make for good sources of entropy, as their seek
128 * times are usually fairly consistent.
1da177e4
LT
129 *
130 * All of these routines try to estimate how many bits of randomness a
131 * particular randomness source. They do this by keeping track of the
132 * first and second order deltas of the event timings.
133 *
2b6c6e3d
MB
134 * add_hwgenerator_randomness() is for true hardware RNGs, and will credit
135 * entropy as specified by the caller. If the entropy pool is full it will
136 * block until more entropy is needed.
137 *
138 * add_bootloader_randomness() is the same as add_hwgenerator_randomness() or
139 * add_device_randomness(), depending on whether or not the configuration
140 * option CONFIG_RANDOM_TRUST_BOOTLOADER is set.
141 *
1da177e4
LT
142 * Ensuring unpredictability at system startup
143 * ============================================
144 *
145 * When any operating system starts up, it will go through a sequence
146 * of actions that are fairly predictable by an adversary, especially
147 * if the start-up does not involve interaction with a human operator.
148 * This reduces the actual number of bits of unpredictability in the
149 * entropy pool below the value in entropy_count. In order to
150 * counteract this effect, it helps to carry information in the
151 * entropy pool across shut-downs and start-ups. To do this, put the
152 * following lines an appropriate script which is run during the boot
153 * sequence:
154 *
155 * echo "Initializing random number generator..."
156 * random_seed=/var/run/random-seed
157 * # Carry a random seed from start-up to start-up
158 * # Load and then save the whole entropy pool
159 * if [ -f $random_seed ]; then
160 * cat $random_seed >/dev/urandom
161 * else
162 * touch $random_seed
163 * fi
164 * chmod 600 $random_seed
165 * dd if=/dev/urandom of=$random_seed count=1 bs=512
166 *
167 * and the following lines in an appropriate script which is run as
168 * the system is shutdown:
169 *
170 * # Carry a random seed from shut-down to start-up
171 * # Save the whole entropy pool
172 * echo "Saving random seed..."
173 * random_seed=/var/run/random-seed
174 * touch $random_seed
175 * chmod 600 $random_seed
176 * dd if=/dev/urandom of=$random_seed count=1 bs=512
177 *
178 * For example, on most modern systems using the System V init
179 * scripts, such code fragments would be found in
180 * /etc/rc.d/init.d/random. On older Linux systems, the correct script
181 * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
182 *
183 * Effectively, these commands cause the contents of the entropy pool
184 * to be saved at shut-down time and reloaded into the entropy pool at
185 * start-up. (The 'dd' in the addition to the bootup script is to
186 * make sure that /etc/random-seed is different for every start-up,
187 * even if the system crashes without executing rc.0.) Even with
188 * complete knowledge of the start-up activities, predicting the state
189 * of the entropy pool requires knowledge of the previous history of
190 * the system.
191 *
192 * Configuring the /dev/random driver under Linux
193 * ==============================================
194 *
195 * The /dev/random driver under Linux uses minor numbers 8 and 9 of
196 * the /dev/mem major number (#1). So if your system does not have
197 * /dev/random and /dev/urandom created already, they can be created
198 * by using the commands:
199 *
248045b8
JD
200 * mknod /dev/random c 1 8
201 * mknod /dev/urandom c 1 9
1da177e4
LT
202 */
203
12cd53af
YL
204#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
205
1da177e4 206#include <linux/utsname.h>
1da177e4
LT
207#include <linux/module.h>
208#include <linux/kernel.h>
209#include <linux/major.h>
210#include <linux/string.h>
211#include <linux/fcntl.h>
212#include <linux/slab.h>
213#include <linux/random.h>
214#include <linux/poll.h>
215#include <linux/init.h>
216#include <linux/fs.h>
217#include <linux/genhd.h>
218#include <linux/interrupt.h>
27ac792c 219#include <linux/mm.h>
dd0f0cf5 220#include <linux/nodemask.h>
1da177e4 221#include <linux/spinlock.h>
c84dbf61 222#include <linux/kthread.h>
1da177e4 223#include <linux/percpu.h>
775f4b29 224#include <linux/ptrace.h>
6265e169 225#include <linux/workqueue.h>
0244ad00 226#include <linux/irq.h>
4e00b339 227#include <linux/ratelimit.h>
c6e9d6f3
TT
228#include <linux/syscalls.h>
229#include <linux/completion.h>
8da4b8c4 230#include <linux/uuid.h>
1ca1b917 231#include <crypto/chacha.h>
9f9eff85 232#include <crypto/blake2s.h>
d178a1eb 233
1da177e4 234#include <asm/processor.h>
7c0f6ba6 235#include <linux/uaccess.h>
1da177e4 236#include <asm/irq.h>
775f4b29 237#include <asm/irq_regs.h>
1da177e4
LT
238#include <asm/io.h>
239
00ce1db1
TT
240#define CREATE_TRACE_POINTS
241#include <trace/events/random.h>
242
43759d4f
TT
243/* #define ADD_INTERRUPT_BENCH */
244
c5704490 245enum {
6e8ec255 246 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
c5704490 247 POOL_MIN_BITS = POOL_BITS /* No point in settling for less. */
1da177e4
LT
248};
249
1da177e4
LT
250/*
251 * Static global variables
252 */
a11e1d43 253static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
9a6f70bb 254static struct fasync_struct *fasync;
1da177e4 255
205a525c
HX
256static DEFINE_SPINLOCK(random_ready_list_lock);
257static LIST_HEAD(random_ready_list);
258
e192be9d
TT
259/*
260 * crng_init = 0 --> Uninitialized
261 * 1 --> Initialized
262 * 2 --> Initialized from input_pool
263 *
264 * crng_init is protected by primary_crng->lock, and only increases
265 * its value (from 0->1->2).
266 */
267static int crng_init = 0;
43838a23 268#define crng_ready() (likely(crng_init > 1))
e192be9d 269static int crng_init_cnt = 0;
e192be9d 270static void process_random_ready_list(void);
04ec96b7 271static void _get_random_bytes(void *buf, size_t nbytes);
e192be9d 272
4e00b339
TT
273static struct ratelimit_state unseeded_warning =
274 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
275static struct ratelimit_state urandom_warning =
276 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
277
278static int ratelimit_disable __read_mostly;
279
280module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
281MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
282
1da177e4
LT
283/**********************************************************************
284 *
285 * OS independent entropy store. Here are the functions which handle
286 * storing entropy in an entropy pool.
287 *
288 **********************************************************************/
289
90ed1e67 290static struct {
6e8ec255 291 struct blake2s_state hash;
43358209 292 spinlock_t lock;
04ec96b7 293 unsigned int entropy_count;
90ed1e67 294} input_pool = {
6e8ec255
JD
295 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
296 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
297 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
298 .hash.outlen = BLAKE2S_HASH_SIZE,
eece09ec 299 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
1da177e4
LT
300};
301
9c07f578 302static void extract_entropy(void *buf, size_t nbytes);
90ed1e67 303
a9412d51 304static void crng_reseed(void);
90ed1e67 305
1da177e4 306/*
e68e5b66 307 * This function adds bytes into the entropy "pool". It does not
1da177e4 308 * update the entropy estimate. The caller should call
adc782da 309 * credit_entropy_bits if this is appropriate.
1da177e4 310 */
04ec96b7 311static void _mix_pool_bytes(const void *in, size_t nbytes)
1da177e4 312{
6e8ec255 313 blake2s_update(&input_pool.hash, in, nbytes);
1da177e4
LT
314}
315
04ec96b7 316static void mix_pool_bytes(const void *in, size_t nbytes)
1da177e4 317{
902c098a
TT
318 unsigned long flags;
319
90ed1e67
JD
320 trace_mix_pool_bytes(nbytes, _RET_IP_);
321 spin_lock_irqsave(&input_pool.lock, flags);
322 _mix_pool_bytes(in, nbytes);
323 spin_unlock_irqrestore(&input_pool.lock, flags);
1da177e4
LT
324}
325
775f4b29 326struct fast_pool {
248045b8
JD
327 u32 pool[4];
328 unsigned long last;
329 u16 reg_idx;
330 u8 count;
775f4b29
TT
331};
332
333/*
334 * This is a fast mixing routine used by the interrupt randomness
335 * collector. It's hardcoded for an 128 bit pool and assumes that any
336 * locks that might be needed are taken by the caller.
337 */
43759d4f 338static void fast_mix(struct fast_pool *f)
775f4b29 339{
d38bb085
JD
340 u32 a = f->pool[0], b = f->pool[1];
341 u32 c = f->pool[2], d = f->pool[3];
43759d4f
TT
342
343 a += b; c += d;
19acc77a 344 b = rol32(b, 6); d = rol32(d, 27);
43759d4f
TT
345 d ^= a; b ^= c;
346
347 a += b; c += d;
19acc77a 348 b = rol32(b, 16); d = rol32(d, 14);
43759d4f
TT
349 d ^= a; b ^= c;
350
351 a += b; c += d;
19acc77a 352 b = rol32(b, 6); d = rol32(d, 27);
43759d4f
TT
353 d ^= a; b ^= c;
354
355 a += b; c += d;
19acc77a 356 b = rol32(b, 16); d = rol32(d, 14);
43759d4f
TT
357 d ^= a; b ^= c;
358
359 f->pool[0] = a; f->pool[1] = b;
360 f->pool[2] = c; f->pool[3] = d;
655b2264 361 f->count++;
775f4b29
TT
362}
363
205a525c
HX
364static void process_random_ready_list(void)
365{
366 unsigned long flags;
367 struct random_ready_callback *rdy, *tmp;
368
369 spin_lock_irqsave(&random_ready_list_lock, flags);
370 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
371 struct module *owner = rdy->owner;
372
373 list_del_init(&rdy->list);
374 rdy->func(rdy);
375 module_put(owner);
376 }
377 spin_unlock_irqrestore(&random_ready_list_lock, flags);
378}
379
04ec96b7 380static void credit_entropy_bits(size_t nbits)
1da177e4 381{
04ec96b7 382 unsigned int entropy_count, orig, add;
18263c4e 383
04ec96b7 384 if (!nbits)
adc782da
MM
385 return;
386
04ec96b7 387 add = min_t(size_t, nbits, POOL_BITS);
a49c010e 388
c5704490
JD
389 do {
390 orig = READ_ONCE(input_pool.entropy_count);
04ec96b7 391 entropy_count = min_t(unsigned int, POOL_BITS, orig + add);
c5704490 392 } while (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig);
1da177e4 393
c5704490 394 trace_credit_entropy_bits(nbits, entropy_count, _RET_IP_);
00ce1db1 395
c5704490 396 if (crng_init < 2 && entropy_count >= POOL_MIN_BITS)
a9412d51 397 crng_reseed();
1da177e4
LT
398}
399
e192be9d
TT
400/*********************************************************************
401 *
402 * CRNG using CHACHA20
403 *
404 *********************************************************************/
405
186873c5
JD
406enum {
407 CRNG_RESEED_INTERVAL = 300 * HZ,
408 CRNG_INIT_CNT_THRESH = 2 * CHACHA_KEY_SIZE
409};
410
411static struct {
412 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
413 unsigned long birth;
414 unsigned long generation;
415 spinlock_t lock;
416} base_crng = {
417 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
418};
419
420struct crng {
421 u8 key[CHACHA_KEY_SIZE];
422 unsigned long generation;
423 local_lock_t lock;
424};
425
426static DEFINE_PER_CPU(struct crng, crngs) = {
427 .generation = ULONG_MAX,
428 .lock = INIT_LOCAL_LOCK(crngs.lock),
429};
e192be9d
TT
430
431static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
432
b169c13d
JD
433static void invalidate_batched_entropy(void);
434
dc12baac
TT
435/*
436 * crng_fast_load() can be called by code in the interrupt service
73c7733f
JD
437 * path. So we can't afford to dilly-dally. Returns the number of
438 * bytes processed from cp.
dc12baac 439 */
04ec96b7 440static size_t crng_fast_load(const void *cp, size_t len)
e192be9d
TT
441{
442 unsigned long flags;
04ec96b7 443 const u8 *src = (const u8 *)cp;
73c7733f 444 size_t ret = 0;
e192be9d 445
186873c5 446 if (!spin_trylock_irqsave(&base_crng.lock, flags))
e192be9d 447 return 0;
43838a23 448 if (crng_init != 0) {
186873c5 449 spin_unlock_irqrestore(&base_crng.lock, flags);
e192be9d
TT
450 return 0;
451 }
e192be9d 452 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
04ec96b7
JD
453 base_crng.key[crng_init_cnt % sizeof(base_crng.key)] ^= *src;
454 src++; crng_init_cnt++; len--; ret++;
e192be9d
TT
455 }
456 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
b169c13d 457 invalidate_batched_entropy();
e192be9d 458 crng_init = 1;
e192be9d 459 }
186873c5 460 spin_unlock_irqrestore(&base_crng.lock, flags);
7c2fe2b3
DB
461 if (crng_init == 1)
462 pr_notice("fast init done\n");
73c7733f 463 return ret;
e192be9d
TT
464}
465
dc12baac
TT
466/*
467 * crng_slow_load() is called by add_device_randomness, which has two
468 * attributes. (1) We can't trust the buffer passed to it is
469 * guaranteed to be unpredictable (so it might not have any entropy at
470 * all), and (2) it doesn't have the performance constraints of
471 * crng_fast_load().
472 *
66e4c2b9
JD
473 * So, we simply hash the contents in with the current key. Finally,
474 * we do *not* advance crng_init_cnt since buffer we may get may be
475 * something like a fixed DMI table (for example), which might very
476 * well be unique to the machine, but is otherwise unvarying.
dc12baac 477 */
04ec96b7 478static void crng_slow_load(const void *cp, size_t len)
dc12baac 479{
248045b8 480 unsigned long flags;
66e4c2b9
JD
481 struct blake2s_state hash;
482
483 blake2s_init(&hash, sizeof(base_crng.key));
dc12baac 484
186873c5 485 if (!spin_trylock_irqsave(&base_crng.lock, flags))
66e4c2b9 486 return;
dc12baac 487 if (crng_init != 0) {
186873c5 488 spin_unlock_irqrestore(&base_crng.lock, flags);
66e4c2b9 489 return;
dc12baac 490 }
66e4c2b9
JD
491
492 blake2s_update(&hash, base_crng.key, sizeof(base_crng.key));
493 blake2s_update(&hash, cp, len);
494 blake2s_final(&hash, base_crng.key);
495
186873c5 496 spin_unlock_irqrestore(&base_crng.lock, flags);
dc12baac
TT
497}
498
a9412d51 499static void crng_reseed(void)
e192be9d 500{
248045b8 501 unsigned long flags;
186873c5
JD
502 int entropy_count;
503 unsigned long next_gen;
504 u8 key[CHACHA_KEY_SIZE];
e192be9d 505
186873c5
JD
506 /*
507 * First we make sure we have POOL_MIN_BITS of entropy in the pool,
508 * and then we drain all of it. Only then can we extract a new key.
509 */
a9412d51
JD
510 do {
511 entropy_count = READ_ONCE(input_pool.entropy_count);
512 if (entropy_count < POOL_MIN_BITS)
513 return;
514 } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
186873c5 515 extract_entropy(key, sizeof(key));
a9412d51
JD
516 wake_up_interruptible(&random_write_wait);
517 kill_fasync(&fasync, SIGIO, POLL_OUT);
518
186873c5
JD
519 /*
520 * We copy the new key into the base_crng, overwriting the old one,
521 * and update the generation counter. We avoid hitting ULONG_MAX,
522 * because the per-cpu crngs are initialized to ULONG_MAX, so this
523 * forces new CPUs that come online to always initialize.
524 */
525 spin_lock_irqsave(&base_crng.lock, flags);
526 memcpy(base_crng.key, key, sizeof(base_crng.key));
527 next_gen = base_crng.generation + 1;
528 if (next_gen == ULONG_MAX)
529 ++next_gen;
530 WRITE_ONCE(base_crng.generation, next_gen);
531 WRITE_ONCE(base_crng.birth, jiffies);
532 spin_unlock_irqrestore(&base_crng.lock, flags);
533 memzero_explicit(key, sizeof(key));
534
a9412d51
JD
535 if (crng_init < 2) {
536 invalidate_batched_entropy();
537 crng_init = 2;
538 process_random_ready_list();
539 wake_up_interruptible(&crng_init_wait);
540 kill_fasync(&fasync, SIGIO, POLL_IN);
541 pr_notice("crng init done\n");
542 if (unseeded_warning.missed) {
543 pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n",
544 unseeded_warning.missed);
545 unseeded_warning.missed = 0;
546 }
547 if (urandom_warning.missed) {
548 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
549 urandom_warning.missed);
550 urandom_warning.missed = 0;
551 }
552 }
e192be9d
TT
553}
554
186873c5
JD
555/*
556 * The general form here is based on a "fast key erasure RNG" from
557 * <https://blog.cr.yp.to/20170723-random.html>. It generates a ChaCha
558 * block using the provided key, and then immediately overwites that
559 * key with half the block. It returns the resultant ChaCha state to the
560 * user, along with the second half of the block containing 32 bytes of
561 * random data that may be used; random_data_len may not be greater than
562 * 32.
563 */
564static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
565 u32 chacha_state[CHACHA_STATE_WORDS],
566 u8 *random_data, size_t random_data_len)
e192be9d 567{
186873c5 568 u8 first_block[CHACHA_BLOCK_SIZE];
009ba856 569
186873c5
JD
570 BUG_ON(random_data_len > 32);
571
572 chacha_init_consts(chacha_state);
573 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
574 memset(&chacha_state[12], 0, sizeof(u32) * 4);
575 chacha20_block(chacha_state, first_block);
576
577 memcpy(key, first_block, CHACHA_KEY_SIZE);
578 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
579 memzero_explicit(first_block, sizeof(first_block));
1e7f583a
TT
580}
581
c92e040d 582/*
186873c5
JD
583 * This function returns a ChaCha state that you may use for generating
584 * random data. It also returns up to 32 bytes on its own of random data
585 * that may be used; random_data_len may not be greater than 32.
c92e040d 586 */
186873c5
JD
587static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
588 u8 *random_data, size_t random_data_len)
c92e040d 589{
248045b8 590 unsigned long flags;
186873c5 591 struct crng *crng;
c92e040d 592
186873c5
JD
593 BUG_ON(random_data_len > 32);
594
595 /*
596 * For the fast path, we check whether we're ready, unlocked first, and
597 * then re-check once locked later. In the case where we're really not
598 * ready, we do fast key erasure with the base_crng directly, because
599 * this is what crng_{fast,slow}_load mutate during early init.
600 */
601 if (unlikely(!crng_ready())) {
602 bool ready;
603
604 spin_lock_irqsave(&base_crng.lock, flags);
605 ready = crng_ready();
606 if (!ready)
607 crng_fast_key_erasure(base_crng.key, chacha_state,
608 random_data, random_data_len);
609 spin_unlock_irqrestore(&base_crng.lock, flags);
610 if (!ready)
611 return;
c92e040d 612 }
186873c5
JD
613
614 /*
615 * If the base_crng is more than 5 minutes old, we reseed, which
616 * in turn bumps the generation counter that we check below.
617 */
618 if (unlikely(time_after(jiffies, READ_ONCE(base_crng.birth) + CRNG_RESEED_INTERVAL)))
619 crng_reseed();
620
621 local_lock_irqsave(&crngs.lock, flags);
622 crng = raw_cpu_ptr(&crngs);
623
624 /*
625 * If our per-cpu crng is older than the base_crng, then it means
626 * somebody reseeded the base_crng. In that case, we do fast key
627 * erasure on the base_crng, and use its output as the new key
628 * for our per-cpu crng. This brings us up to date with base_crng.
629 */
630 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
631 spin_lock(&base_crng.lock);
632 crng_fast_key_erasure(base_crng.key, chacha_state,
633 crng->key, sizeof(crng->key));
634 crng->generation = base_crng.generation;
635 spin_unlock(&base_crng.lock);
636 }
637
638 /*
639 * Finally, when we've made it this far, our per-cpu crng has an up
640 * to date key, and we can do fast key erasure with it to produce
641 * some random data and a ChaCha state for the caller. All other
642 * branches of this function are "unlikely", so most of the time we
643 * should wind up here immediately.
644 */
645 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
646 local_unlock_irqrestore(&crngs.lock, flags);
c92e040d
TT
647}
648
186873c5 649static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes)
e192be9d 650{
186873c5 651 bool large_request = nbytes > 256;
04ec96b7
JD
652 ssize_t ret = 0;
653 size_t len;
186873c5
JD
654 u32 chacha_state[CHACHA_STATE_WORDS];
655 u8 output[CHACHA_BLOCK_SIZE];
656
657 if (!nbytes)
658 return 0;
659
04ec96b7 660 len = min_t(size_t, 32, nbytes);
186873c5
JD
661 crng_make_state(chacha_state, output, len);
662
663 if (copy_to_user(buf, output, len))
664 return -EFAULT;
665 nbytes -= len;
666 buf += len;
667 ret += len;
e192be9d
TT
668
669 while (nbytes) {
670 if (large_request && need_resched()) {
186873c5 671 if (signal_pending(current))
e192be9d 672 break;
e192be9d
TT
673 schedule();
674 }
675
186873c5
JD
676 chacha20_block(chacha_state, output);
677 if (unlikely(chacha_state[12] == 0))
678 ++chacha_state[13];
679
04ec96b7 680 len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE);
186873c5 681 if (copy_to_user(buf, output, len)) {
e192be9d
TT
682 ret = -EFAULT;
683 break;
684 }
685
186873c5
JD
686 nbytes -= len;
687 buf += len;
688 ret += len;
e192be9d 689 }
e192be9d 690
186873c5
JD
691 memzero_explicit(chacha_state, sizeof(chacha_state));
692 memzero_explicit(output, sizeof(output));
e192be9d
TT
693 return ret;
694}
695
1da177e4
LT
696/*********************************************************************
697 *
698 * Entropy input management
699 *
700 *********************************************************************/
701
702/* There is one of these per entropy source */
703struct timer_rand_state {
704 cycles_t last_time;
90b75ee5 705 long last_delta, last_delta2;
1da177e4
LT
706};
707
644008df
TT
708#define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
709
a2080a67 710/*
e192be9d
TT
711 * Add device- or boot-specific data to the input pool to help
712 * initialize it.
a2080a67 713 *
e192be9d
TT
714 * None of this adds any entropy; it is meant to avoid the problem of
715 * the entropy pool having similar initial state across largely
716 * identical devices.
a2080a67 717 */
04ec96b7 718void add_device_randomness(const void *buf, size_t size)
a2080a67 719{
61875f30 720 unsigned long time = random_get_entropy() ^ jiffies;
3ef4cb2d 721 unsigned long flags;
a2080a67 722
dc12baac
TT
723 if (!crng_ready() && size)
724 crng_slow_load(buf, size);
ee7998c5 725
5910895f 726 trace_add_device_randomness(size, _RET_IP_);
3ef4cb2d 727 spin_lock_irqsave(&input_pool.lock, flags);
90ed1e67
JD
728 _mix_pool_bytes(buf, size);
729 _mix_pool_bytes(&time, sizeof(time));
3ef4cb2d 730 spin_unlock_irqrestore(&input_pool.lock, flags);
a2080a67
LT
731}
732EXPORT_SYMBOL(add_device_randomness);
733
644008df 734static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
3060d6fe 735
1da177e4
LT
736/*
737 * This function adds entropy to the entropy "pool" by using timing
738 * delays. It uses the timer_rand_state structure to make an estimate
739 * of how many bits of entropy this call has added to the pool.
740 *
741 * The number "num" is also added to the pool - it should somehow describe
742 * the type of event which just happened. This is currently 0-255 for
743 * keyboard scan codes, and 256 upwards for interrupts.
744 *
745 */
04ec96b7 746static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1da177e4
LT
747{
748 struct {
1da177e4 749 long jiffies;
d38bb085
JD
750 unsigned int cycles;
751 unsigned int num;
1da177e4
LT
752 } sample;
753 long delta, delta2, delta3;
754
1da177e4 755 sample.jiffies = jiffies;
61875f30 756 sample.cycles = random_get_entropy();
1da177e4 757 sample.num = num;
90ed1e67 758 mix_pool_bytes(&sample, sizeof(sample));
1da177e4
LT
759
760 /*
761 * Calculate number of bits of randomness we probably added.
762 * We take into account the first, second and third-order deltas
763 * in order to make our estimate.
764 */
e00d996a
QC
765 delta = sample.jiffies - READ_ONCE(state->last_time);
766 WRITE_ONCE(state->last_time, sample.jiffies);
5e747dd9 767
e00d996a
QC
768 delta2 = delta - READ_ONCE(state->last_delta);
769 WRITE_ONCE(state->last_delta, delta);
5e747dd9 770
e00d996a
QC
771 delta3 = delta2 - READ_ONCE(state->last_delta2);
772 WRITE_ONCE(state->last_delta2, delta2);
5e747dd9
RV
773
774 if (delta < 0)
775 delta = -delta;
776 if (delta2 < 0)
777 delta2 = -delta2;
778 if (delta3 < 0)
779 delta3 = -delta3;
780 if (delta > delta2)
781 delta = delta2;
782 if (delta > delta3)
783 delta = delta3;
1da177e4 784
5e747dd9
RV
785 /*
786 * delta is now minimum absolute delta.
787 * Round down by 1 bit on general principles,
727d499a 788 * and limit entropy estimate to 12 bits.
5e747dd9 789 */
04ec96b7 790 credit_entropy_bits(min_t(unsigned int, fls(delta >> 1), 11));
1da177e4
LT
791}
792
d251575a 793void add_input_randomness(unsigned int type, unsigned int code,
248045b8 794 unsigned int value)
1da177e4
LT
795{
796 static unsigned char last_value;
797
798 /* ignore autorepeat and the like */
799 if (value == last_value)
800 return;
801
1da177e4
LT
802 last_value = value;
803 add_timer_randomness(&input_timer_state,
804 (type << 4) ^ code ^ (code >> 4) ^ value);
c5704490 805 trace_add_input_randomness(input_pool.entropy_count);
1da177e4 806}
80fc9f53 807EXPORT_SYMBOL_GPL(add_input_randomness);
1da177e4 808
775f4b29
TT
809static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
810
43759d4f
TT
811#ifdef ADD_INTERRUPT_BENCH
812static unsigned long avg_cycles, avg_deviation;
813
248045b8
JD
814#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
815#define FIXED_1_2 (1 << (AVG_SHIFT - 1))
43759d4f
TT
816
817static void add_interrupt_bench(cycles_t start)
818{
248045b8 819 long delta = random_get_entropy() - start;
43759d4f 820
248045b8
JD
821 /* Use a weighted moving average */
822 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
823 avg_cycles += delta;
824 /* And average deviation */
825 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
826 avg_deviation += delta;
43759d4f
TT
827}
828#else
829#define add_interrupt_bench(x)
830#endif
831
d38bb085 832static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
ee3e00e9 833{
248045b8 834 u32 *ptr = (u32 *)regs;
92e75428 835 unsigned int idx;
ee3e00e9
TT
836
837 if (regs == NULL)
838 return 0;
92e75428 839 idx = READ_ONCE(f->reg_idx);
d38bb085 840 if (idx >= sizeof(struct pt_regs) / sizeof(u32))
92e75428
TT
841 idx = 0;
842 ptr += idx++;
843 WRITE_ONCE(f->reg_idx, idx);
9dfa7bba 844 return *ptr;
ee3e00e9
TT
845}
846
703f7066 847void add_interrupt_randomness(int irq)
1da177e4 848{
248045b8
JD
849 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
850 struct pt_regs *regs = get_irq_regs();
851 unsigned long now = jiffies;
852 cycles_t cycles = random_get_entropy();
853 u32 c_high, j_high;
854 u64 ip;
3060d6fe 855
ee3e00e9
TT
856 if (cycles == 0)
857 cycles = get_reg(fast_pool, regs);
655b2264
TT
858 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
859 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
43759d4f
TT
860 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
861 fast_pool->pool[1] ^= now ^ c_high;
655b2264 862 ip = regs ? instruction_pointer(regs) : _RET_IP_;
43759d4f 863 fast_pool->pool[2] ^= ip;
248045b8
JD
864 fast_pool->pool[3] ^=
865 (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
3060d6fe 866
43759d4f 867 fast_mix(fast_pool);
43759d4f 868 add_interrupt_bench(cycles);
3060d6fe 869
43838a23 870 if (unlikely(crng_init == 0)) {
04ec96b7
JD
871 if (fast_pool->count >= 64 &&
872 crng_fast_load(fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
e192be9d
TT
873 fast_pool->count = 0;
874 fast_pool->last = now;
c30c575d
JD
875 if (spin_trylock(&input_pool.lock)) {
876 _mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
877 spin_unlock(&input_pool.lock);
878 }
e192be9d
TT
879 }
880 return;
881 }
882
248045b8 883 if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
1da177e4
LT
884 return;
885
90ed1e67 886 if (!spin_trylock(&input_pool.lock))
91fcb532 887 return;
83664a69 888
91fcb532 889 fast_pool->last = now;
04ec96b7 890 _mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
90ed1e67 891 spin_unlock(&input_pool.lock);
83664a69 892
ee3e00e9 893 fast_pool->count = 0;
83664a69 894
ee3e00e9 895 /* award one bit for the contents of the fast pool */
90ed1e67 896 credit_entropy_bits(1);
1da177e4 897}
4b44f2d1 898EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1da177e4 899
9361401e 900#ifdef CONFIG_BLOCK
1da177e4
LT
901void add_disk_randomness(struct gendisk *disk)
902{
903 if (!disk || !disk->random)
904 return;
905 /* first major is 1, so we get >= 0x200 here */
f331c029 906 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
c5704490 907 trace_add_disk_randomness(disk_devt(disk), input_pool.entropy_count);
1da177e4 908}
bdcfa3e5 909EXPORT_SYMBOL_GPL(add_disk_randomness);
9361401e 910#endif
1da177e4 911
1da177e4
LT
912/*********************************************************************
913 *
914 * Entropy extraction routines
915 *
916 *********************************************************************/
917
19fa5be1 918/*
6e8ec255
JD
919 * This is an HKDF-like construction for using the hashed collected entropy
920 * as a PRF key, that's then expanded block-by-block.
19fa5be1 921 */
9c07f578 922static void extract_entropy(void *buf, size_t nbytes)
1da177e4 923{
902c098a 924 unsigned long flags;
6e8ec255
JD
925 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
926 struct {
28f425e5 927 unsigned long rdseed[32 / sizeof(long)];
6e8ec255
JD
928 size_t counter;
929 } block;
930 size_t i;
931
c5704490 932 trace_extract_entropy(nbytes, input_pool.entropy_count);
9c07f578 933
28f425e5
JD
934 for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) {
935 if (!arch_get_random_seed_long(&block.rdseed[i]) &&
936 !arch_get_random_long(&block.rdseed[i]))
937 block.rdseed[i] = random_get_entropy();
85a1f777
TT
938 }
939
90ed1e67 940 spin_lock_irqsave(&input_pool.lock, flags);
46884442 941
6e8ec255
JD
942 /* seed = HASHPRF(last_key, entropy_input) */
943 blake2s_final(&input_pool.hash, seed);
1da177e4 944
28f425e5 945 /* next_key = HASHPRF(seed, RDSEED || 0) */
6e8ec255
JD
946 block.counter = 0;
947 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
948 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
1da177e4 949
6e8ec255
JD
950 spin_unlock_irqrestore(&input_pool.lock, flags);
951 memzero_explicit(next_key, sizeof(next_key));
e192be9d
TT
952
953 while (nbytes) {
6e8ec255 954 i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE);
28f425e5 955 /* output = HASHPRF(seed, RDSEED || ++counter) */
6e8ec255
JD
956 ++block.counter;
957 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
e192be9d
TT
958 nbytes -= i;
959 buf += i;
e192be9d
TT
960 }
961
6e8ec255
JD
962 memzero_explicit(seed, sizeof(seed));
963 memzero_explicit(&block, sizeof(block));
e192be9d
TT
964}
965
eecabf56 966#define warn_unseeded_randomness(previous) \
248045b8 967 _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
eecabf56 968
248045b8 969static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
eecabf56
TT
970{
971#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
972 const bool print_once = false;
973#else
974 static bool print_once __read_mostly;
975#endif
976
248045b8 977 if (print_once || crng_ready() ||
eecabf56
TT
978 (previous && (caller == READ_ONCE(*previous))))
979 return;
980 WRITE_ONCE(*previous, caller);
981#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
982 print_once = true;
983#endif
4e00b339 984 if (__ratelimit(&unseeded_warning))
248045b8
JD
985 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
986 func_name, caller, crng_init);
eecabf56
TT
987}
988
1da177e4
LT
989/*
990 * This function is the exported kernel interface. It returns some
c2557a30 991 * number of good random numbers, suitable for key generation, seeding
18e9cea7
GP
992 * TCP sequence numbers, etc. It does not rely on the hardware random
993 * number generator. For random bytes direct from the hardware RNG
e297a783
JD
994 * (when available), use get_random_bytes_arch(). In order to ensure
995 * that the randomness provided by this function is okay, the function
996 * wait_for_random_bytes() should be called and return 0 at least once
997 * at any point prior.
1da177e4 998 */
04ec96b7 999static void _get_random_bytes(void *buf, size_t nbytes)
c2557a30 1000{
186873c5
JD
1001 u32 chacha_state[CHACHA_STATE_WORDS];
1002 u8 tmp[CHACHA_BLOCK_SIZE];
04ec96b7 1003 size_t len;
e192be9d 1004
5910895f 1005 trace_get_random_bytes(nbytes, _RET_IP_);
e192be9d 1006
186873c5
JD
1007 if (!nbytes)
1008 return;
1009
04ec96b7 1010 len = min_t(size_t, 32, nbytes);
186873c5
JD
1011 crng_make_state(chacha_state, buf, len);
1012 nbytes -= len;
1013 buf += len;
1014
1015 while (nbytes) {
1016 if (nbytes < CHACHA_BLOCK_SIZE) {
1017 chacha20_block(chacha_state, tmp);
1018 memcpy(buf, tmp, nbytes);
1019 memzero_explicit(tmp, sizeof(tmp));
1020 break;
1021 }
1022
1023 chacha20_block(chacha_state, buf);
1024 if (unlikely(chacha_state[12] == 0))
1025 ++chacha_state[13];
1ca1b917 1026 nbytes -= CHACHA_BLOCK_SIZE;
186873c5 1027 buf += CHACHA_BLOCK_SIZE;
e192be9d
TT
1028 }
1029
186873c5 1030 memzero_explicit(chacha_state, sizeof(chacha_state));
c2557a30 1031}
eecabf56 1032
04ec96b7 1033void get_random_bytes(void *buf, size_t nbytes)
eecabf56
TT
1034{
1035 static void *previous;
1036
1037 warn_unseeded_randomness(&previous);
1038 _get_random_bytes(buf, nbytes);
1039}
c2557a30
TT
1040EXPORT_SYMBOL(get_random_bytes);
1041
50ee7529
LT
1042/*
1043 * Each time the timer fires, we expect that we got an unpredictable
1044 * jump in the cycle counter. Even if the timer is running on another
1045 * CPU, the timer activity will be touching the stack of the CPU that is
1046 * generating entropy..
1047 *
1048 * Note that we don't re-arm the timer in the timer itself - we are
1049 * happy to be scheduled away, since that just makes the load more
1050 * complex, but we do not want the timer to keep ticking unless the
1051 * entropy loop is running.
1052 *
1053 * So the re-arming always happens in the entropy loop itself.
1054 */
1055static void entropy_timer(struct timer_list *t)
1056{
90ed1e67 1057 credit_entropy_bits(1);
50ee7529
LT
1058}
1059
1060/*
1061 * If we have an actual cycle counter, see if we can
1062 * generate enough entropy with timing noise
1063 */
1064static void try_to_generate_entropy(void)
1065{
1066 struct {
1067 unsigned long now;
1068 struct timer_list timer;
1069 } stack;
1070
1071 stack.now = random_get_entropy();
1072
1073 /* Slow counter - or none. Don't even bother */
1074 if (stack.now == random_get_entropy())
1075 return;
1076
1077 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1078 while (!crng_ready()) {
1079 if (!timer_pending(&stack.timer))
248045b8 1080 mod_timer(&stack.timer, jiffies + 1);
90ed1e67 1081 mix_pool_bytes(&stack.now, sizeof(stack.now));
50ee7529
LT
1082 schedule();
1083 stack.now = random_get_entropy();
1084 }
1085
1086 del_timer_sync(&stack.timer);
1087 destroy_timer_on_stack(&stack.timer);
90ed1e67 1088 mix_pool_bytes(&stack.now, sizeof(stack.now));
50ee7529
LT
1089}
1090
e297a783
JD
1091/*
1092 * Wait for the urandom pool to be seeded and thus guaranteed to supply
1093 * cryptographically secure random numbers. This applies to: the /dev/urandom
1094 * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1095 * family of functions. Using any of these functions without first calling
1096 * this function forfeits the guarantee of security.
1097 *
1098 * Returns: 0 if the urandom pool has been seeded.
1099 * -ERESTARTSYS if the function was interrupted by a signal.
1100 */
1101int wait_for_random_bytes(void)
1102{
1103 if (likely(crng_ready()))
1104 return 0;
50ee7529
LT
1105
1106 do {
1107 int ret;
1108 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1109 if (ret)
1110 return ret > 0 ? 0 : ret;
1111
1112 try_to_generate_entropy();
1113 } while (!crng_ready());
1114
1115 return 0;
e297a783
JD
1116}
1117EXPORT_SYMBOL(wait_for_random_bytes);
1118
9a47249d
JD
1119/*
1120 * Returns whether or not the urandom pool has been seeded and thus guaranteed
1121 * to supply cryptographically secure random numbers. This applies to: the
1122 * /dev/urandom device, the get_random_bytes function, and the get_random_{u32,
1123 * ,u64,int,long} family of functions.
1124 *
1125 * Returns: true if the urandom pool has been seeded.
1126 * false if the urandom pool has not been seeded.
1127 */
1128bool rng_is_initialized(void)
1129{
1130 return crng_ready();
1131}
1132EXPORT_SYMBOL(rng_is_initialized);
1133
205a525c
HX
1134/*
1135 * Add a callback function that will be invoked when the nonblocking
1136 * pool is initialised.
1137 *
1138 * returns: 0 if callback is successfully added
1139 * -EALREADY if pool is already initialised (callback not called)
1140 * -ENOENT if module for callback is not alive
1141 */
1142int add_random_ready_callback(struct random_ready_callback *rdy)
1143{
1144 struct module *owner;
1145 unsigned long flags;
1146 int err = -EALREADY;
1147
e192be9d 1148 if (crng_ready())
205a525c
HX
1149 return err;
1150
1151 owner = rdy->owner;
1152 if (!try_module_get(owner))
1153 return -ENOENT;
1154
1155 spin_lock_irqsave(&random_ready_list_lock, flags);
e192be9d 1156 if (crng_ready())
205a525c
HX
1157 goto out;
1158
1159 owner = NULL;
1160
1161 list_add(&rdy->list, &random_ready_list);
1162 err = 0;
1163
1164out:
1165 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1166
1167 module_put(owner);
1168
1169 return err;
1170}
1171EXPORT_SYMBOL(add_random_ready_callback);
1172
1173/*
1174 * Delete a previously registered readiness callback function.
1175 */
1176void del_random_ready_callback(struct random_ready_callback *rdy)
1177{
1178 unsigned long flags;
1179 struct module *owner = NULL;
1180
1181 spin_lock_irqsave(&random_ready_list_lock, flags);
1182 if (!list_empty(&rdy->list)) {
1183 list_del_init(&rdy->list);
1184 owner = rdy->owner;
1185 }
1186 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1187
1188 module_put(owner);
1189}
1190EXPORT_SYMBOL(del_random_ready_callback);
1191
c2557a30
TT
1192/*
1193 * This function will use the architecture-specific hardware random
04ec96b7
JD
1194 * number generator if it is available. It is not recommended for
1195 * use. Use get_random_bytes() instead. It returns the number of
1196 * bytes filled in.
c2557a30 1197 */
04ec96b7 1198size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes)
1da177e4 1199{
04ec96b7 1200 size_t left = nbytes;
d38bb085 1201 u8 *p = buf;
63d77173 1202
753d433b
TH
1203 trace_get_random_bytes_arch(left, _RET_IP_);
1204 while (left) {
63d77173 1205 unsigned long v;
04ec96b7 1206 size_t chunk = min_t(size_t, left, sizeof(unsigned long));
c2557a30 1207
63d77173
PA
1208 if (!arch_get_random_long(&v))
1209 break;
8ddd6efa 1210
bd29e568 1211 memcpy(p, &v, chunk);
63d77173 1212 p += chunk;
753d433b 1213 left -= chunk;
63d77173
PA
1214 }
1215
753d433b 1216 return nbytes - left;
1da177e4 1217}
c2557a30
TT
1218EXPORT_SYMBOL(get_random_bytes_arch);
1219
85664172
JD
1220static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
1221static int __init parse_trust_cpu(char *arg)
1222{
1223 return kstrtobool(arg, &trust_cpu);
1224}
1225early_param("random.trust_cpu", parse_trust_cpu);
1226
1da177e4 1227/*
85664172
JD
1228 * Note that setup_arch() may call add_device_randomness()
1229 * long before we get here. This allows seeding of the pools
1230 * with some platform dependent data very early in the boot
1231 * process. But it limits our options here. We must use
1232 * statically allocated structures that already have all
1233 * initializations complete at compile time. We should also
1234 * take care not to overwrite the precious per platform data
1235 * we were given.
1da177e4 1236 */
85664172 1237int __init rand_initialize(void)
1da177e4 1238{
04ec96b7 1239 size_t i;
902c098a 1240 ktime_t now = ktime_get_real();
85664172 1241 bool arch_init = true;
902c098a 1242 unsigned long rv;
1da177e4 1243
04ec96b7 1244 for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) {
85664172
JD
1245 if (!arch_get_random_seed_long_early(&rv) &&
1246 !arch_get_random_long_early(&rv)) {
1247 rv = random_get_entropy();
1248 arch_init = false;
1249 }
a02cf3d0 1250 mix_pool_bytes(&rv, sizeof(rv));
85664172 1251 }
a02cf3d0
JD
1252 mix_pool_bytes(&now, sizeof(now));
1253 mix_pool_bytes(utsname(), sizeof(*(utsname())));
1254
186873c5 1255 extract_entropy(base_crng.key, sizeof(base_crng.key));
85664172
JD
1256 if (arch_init && trust_cpu && crng_init < 2) {
1257 invalidate_batched_entropy();
1258 crng_init = 2;
1259 pr_notice("crng init done (trusting CPU's manufacturer)\n");
1260 }
85664172 1261
4e00b339
TT
1262 if (ratelimit_disable) {
1263 urandom_warning.interval = 0;
1264 unseeded_warning.interval = 0;
1265 }
1da177e4
LT
1266 return 0;
1267}
1da177e4 1268
9361401e 1269#ifdef CONFIG_BLOCK
1da177e4
LT
1270void rand_initialize_disk(struct gendisk *disk)
1271{
1272 struct timer_rand_state *state;
1273
1274 /*
f8595815 1275 * If kzalloc returns null, we just won't use that entropy
1da177e4
LT
1276 * source.
1277 */
f8595815 1278 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
644008df
TT
1279 if (state) {
1280 state->last_time = INITIAL_JIFFIES;
1da177e4 1281 disk->random = state;
644008df 1282 }
1da177e4 1283}
9361401e 1284#endif
1da177e4 1285
248045b8
JD
1286static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
1287 size_t nbytes, loff_t *ppos)
c6f1deb1 1288{
434537ae 1289 ssize_t ret;
c6f1deb1 1290
186873c5 1291 ret = get_random_bytes_user(buf, nbytes);
04ec96b7 1292 trace_urandom_read(nbytes, input_pool.entropy_count);
c6f1deb1
AL
1293 return ret;
1294}
1295
248045b8
JD
1296static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
1297 loff_t *ppos)
1da177e4 1298{
9b4d0087 1299 static int maxwarn = 10;
301f0595 1300
e192be9d 1301 if (!crng_ready() && maxwarn > 0) {
9b4d0087 1302 maxwarn--;
4e00b339 1303 if (__ratelimit(&urandom_warning))
12cd53af
YL
1304 pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
1305 current->comm, nbytes);
9b4d0087 1306 }
c6f1deb1
AL
1307
1308 return urandom_read_nowarn(file, buf, nbytes, ppos);
1da177e4
LT
1309}
1310
248045b8
JD
1311static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
1312 loff_t *ppos)
30c08efe
AL
1313{
1314 int ret;
1315
1316 ret = wait_for_random_bytes();
1317 if (ret != 0)
1318 return ret;
1319 return urandom_read_nowarn(file, buf, nbytes, ppos);
1320}
1321
248045b8 1322static __poll_t random_poll(struct file *file, poll_table *wait)
1da177e4 1323{
a11e1d43 1324 __poll_t mask;
1da177e4 1325
30c08efe 1326 poll_wait(file, &crng_init_wait, wait);
a11e1d43
LT
1327 poll_wait(file, &random_write_wait, wait);
1328 mask = 0;
30c08efe 1329 if (crng_ready())
a9a08845 1330 mask |= EPOLLIN | EPOLLRDNORM;
489c7fc4 1331 if (input_pool.entropy_count < POOL_MIN_BITS)
a9a08845 1332 mask |= EPOLLOUT | EPOLLWRNORM;
1da177e4
LT
1333 return mask;
1334}
1335
04ec96b7 1336static int write_pool(const char __user *ubuf, size_t count)
1da177e4 1337{
04ec96b7 1338 size_t len;
7b5164fb 1339 int ret = 0;
04ec96b7 1340 u8 block[BLAKE2S_BLOCK_SIZE];
1da177e4 1341
04ec96b7
JD
1342 while (count) {
1343 len = min(count, sizeof(block));
7b5164fb
JD
1344 if (copy_from_user(block, ubuf, len)) {
1345 ret = -EFAULT;
1346 goto out;
1347 }
04ec96b7
JD
1348 count -= len;
1349 ubuf += len;
1350 mix_pool_bytes(block, len);
91f3f1e3 1351 cond_resched();
1da177e4 1352 }
7f397dcd 1353
7b5164fb
JD
1354out:
1355 memzero_explicit(block, sizeof(block));
1356 return ret;
7f397dcd
MM
1357}
1358
90b75ee5
MM
1359static ssize_t random_write(struct file *file, const char __user *buffer,
1360 size_t count, loff_t *ppos)
7f397dcd 1361{
04ec96b7 1362 int ret;
7f397dcd 1363
90ed1e67 1364 ret = write_pool(buffer, count);
7f397dcd
MM
1365 if (ret)
1366 return ret;
1367
7f397dcd 1368 return (ssize_t)count;
1da177e4
LT
1369}
1370
43ae4860 1371static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1da177e4
LT
1372{
1373 int size, ent_count;
1374 int __user *p = (int __user *)arg;
1375 int retval;
1376
1377 switch (cmd) {
1378 case RNDGETENTCNT:
43ae4860 1379 /* inherently racy, no point locking */
c5704490 1380 if (put_user(input_pool.entropy_count, p))
1da177e4
LT
1381 return -EFAULT;
1382 return 0;
1383 case RNDADDTOENTCNT:
1384 if (!capable(CAP_SYS_ADMIN))
1385 return -EPERM;
1386 if (get_user(ent_count, p))
1387 return -EFAULT;
a49c010e
JD
1388 if (ent_count < 0)
1389 return -EINVAL;
1390 credit_entropy_bits(ent_count);
1391 return 0;
1da177e4
LT
1392 case RNDADDENTROPY:
1393 if (!capable(CAP_SYS_ADMIN))
1394 return -EPERM;
1395 if (get_user(ent_count, p++))
1396 return -EFAULT;
1397 if (ent_count < 0)
1398 return -EINVAL;
1399 if (get_user(size, p++))
1400 return -EFAULT;
90ed1e67 1401 retval = write_pool((const char __user *)p, size);
1da177e4
LT
1402 if (retval < 0)
1403 return retval;
a49c010e
JD
1404 credit_entropy_bits(ent_count);
1405 return 0;
1da177e4
LT
1406 case RNDZAPENTCNT:
1407 case RNDCLEARPOOL:
ae9ecd92
TT
1408 /*
1409 * Clear the entropy pool counters. We no longer clear
1410 * the entropy pool, as that's silly.
1411 */
1da177e4
LT
1412 if (!capable(CAP_SYS_ADMIN))
1413 return -EPERM;
489c7fc4 1414 if (xchg(&input_pool.entropy_count, 0)) {
042e293e
JD
1415 wake_up_interruptible(&random_write_wait);
1416 kill_fasync(&fasync, SIGIO, POLL_OUT);
1417 }
1da177e4 1418 return 0;
d848e5f8
TT
1419 case RNDRESEEDCRNG:
1420 if (!capable(CAP_SYS_ADMIN))
1421 return -EPERM;
1422 if (crng_init < 2)
1423 return -ENODATA;
a9412d51 1424 crng_reseed();
d848e5f8 1425 return 0;
1da177e4
LT
1426 default:
1427 return -EINVAL;
1428 }
1429}
1430
9a6f70bb
JD
1431static int random_fasync(int fd, struct file *filp, int on)
1432{
1433 return fasync_helper(fd, filp, on, &fasync);
1434}
1435
2b8693c0 1436const struct file_operations random_fops = {
248045b8 1437 .read = random_read,
1da177e4 1438 .write = random_write,
248045b8 1439 .poll = random_poll,
43ae4860 1440 .unlocked_ioctl = random_ioctl,
507e4e2b 1441 .compat_ioctl = compat_ptr_ioctl,
9a6f70bb 1442 .fasync = random_fasync,
6038f373 1443 .llseek = noop_llseek,
1da177e4
LT
1444};
1445
2b8693c0 1446const struct file_operations urandom_fops = {
248045b8 1447 .read = urandom_read,
1da177e4 1448 .write = random_write,
43ae4860 1449 .unlocked_ioctl = random_ioctl,
4aa37c46 1450 .compat_ioctl = compat_ptr_ioctl,
9a6f70bb 1451 .fasync = random_fasync,
6038f373 1452 .llseek = noop_llseek,
1da177e4
LT
1453};
1454
248045b8
JD
1455SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
1456 flags)
c6e9d6f3 1457{
248045b8 1458 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
75551dbf
AL
1459 return -EINVAL;
1460
1461 /*
1462 * Requesting insecure and blocking randomness at the same time makes
1463 * no sense.
1464 */
248045b8 1465 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
c6e9d6f3
TT
1466 return -EINVAL;
1467
1468 if (count > INT_MAX)
1469 count = INT_MAX;
1470
75551dbf 1471 if (!(flags & GRND_INSECURE) && !crng_ready()) {
04ec96b7
JD
1472 int ret;
1473
c6e9d6f3
TT
1474 if (flags & GRND_NONBLOCK)
1475 return -EAGAIN;
e297a783
JD
1476 ret = wait_for_random_bytes();
1477 if (unlikely(ret))
1478 return ret;
c6e9d6f3 1479 }
c6f1deb1 1480 return urandom_read_nowarn(NULL, buf, count, NULL);
c6e9d6f3
TT
1481}
1482
1da177e4
LT
1483/********************************************************************
1484 *
1485 * Sysctl interface
1486 *
1487 ********************************************************************/
1488
1489#ifdef CONFIG_SYSCTL
1490
1491#include <linux/sysctl.h>
1492
db61ffe3 1493static int random_min_urandom_seed = 60;
489c7fc4
JD
1494static int random_write_wakeup_bits = POOL_MIN_BITS;
1495static int sysctl_poolsize = POOL_BITS;
1da177e4
LT
1496static char sysctl_bootid[16];
1497
1498/*
f22052b2 1499 * This function is used to return both the bootid UUID, and random
1da177e4
LT
1500 * UUID. The difference is in whether table->data is NULL; if it is,
1501 * then a new UUID is generated and returned to the user.
1502 *
f22052b2
GP
1503 * If the user accesses this via the proc interface, the UUID will be
1504 * returned as an ASCII string in the standard UUID format; if via the
1505 * sysctl system call, as 16 bytes of binary data.
1da177e4 1506 */
248045b8
JD
1507static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
1508 size_t *lenp, loff_t *ppos)
1da177e4 1509{
a151427e 1510 struct ctl_table fake_table;
1da177e4
LT
1511 unsigned char buf[64], tmp_uuid[16], *uuid;
1512
1513 uuid = table->data;
1514 if (!uuid) {
1515 uuid = tmp_uuid;
1da177e4 1516 generate_random_uuid(uuid);
44e4360f
MD
1517 } else {
1518 static DEFINE_SPINLOCK(bootid_spinlock);
1519
1520 spin_lock(&bootid_spinlock);
1521 if (!uuid[8])
1522 generate_random_uuid(uuid);
1523 spin_unlock(&bootid_spinlock);
1524 }
1da177e4 1525
35900771
JP
1526 sprintf(buf, "%pU", uuid);
1527
1da177e4
LT
1528 fake_table.data = buf;
1529 fake_table.maxlen = sizeof(buf);
1530
8d65af78 1531 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
1da177e4
LT
1532}
1533
5475e8f0 1534static struct ctl_table random_table[] = {
1da177e4 1535 {
1da177e4
LT
1536 .procname = "poolsize",
1537 .data = &sysctl_poolsize,
1538 .maxlen = sizeof(int),
1539 .mode = 0444,
6d456111 1540 .proc_handler = proc_dointvec,
1da177e4
LT
1541 },
1542 {
1da177e4 1543 .procname = "entropy_avail",
c5704490 1544 .data = &input_pool.entropy_count,
1da177e4
LT
1545 .maxlen = sizeof(int),
1546 .mode = 0444,
c5704490 1547 .proc_handler = proc_dointvec,
1da177e4 1548 },
1da177e4 1549 {
1da177e4 1550 .procname = "write_wakeup_threshold",
2132a96f 1551 .data = &random_write_wakeup_bits,
1da177e4
LT
1552 .maxlen = sizeof(int),
1553 .mode = 0644,
489c7fc4 1554 .proc_handler = proc_dointvec,
1da177e4 1555 },
f5c2742c
TT
1556 {
1557 .procname = "urandom_min_reseed_secs",
1558 .data = &random_min_urandom_seed,
1559 .maxlen = sizeof(int),
1560 .mode = 0644,
1561 .proc_handler = proc_dointvec,
1562 },
1da177e4 1563 {
1da177e4
LT
1564 .procname = "boot_id",
1565 .data = &sysctl_bootid,
1566 .maxlen = 16,
1567 .mode = 0444,
6d456111 1568 .proc_handler = proc_do_uuid,
1da177e4
LT
1569 },
1570 {
1da177e4
LT
1571 .procname = "uuid",
1572 .maxlen = 16,
1573 .mode = 0444,
6d456111 1574 .proc_handler = proc_do_uuid,
1da177e4 1575 },
43759d4f
TT
1576#ifdef ADD_INTERRUPT_BENCH
1577 {
1578 .procname = "add_interrupt_avg_cycles",
1579 .data = &avg_cycles,
1580 .maxlen = sizeof(avg_cycles),
1581 .mode = 0444,
1582 .proc_handler = proc_doulongvec_minmax,
1583 },
1584 {
1585 .procname = "add_interrupt_avg_deviation",
1586 .data = &avg_deviation,
1587 .maxlen = sizeof(avg_deviation),
1588 .mode = 0444,
1589 .proc_handler = proc_doulongvec_minmax,
1590 },
1591#endif
894d2491 1592 { }
1da177e4 1593};
5475e8f0
XN
1594
1595/*
1596 * rand_initialize() is called before sysctl_init(),
1597 * so we cannot call register_sysctl_init() in rand_initialize()
1598 */
1599static int __init random_sysctls_init(void)
1600{
1601 register_sysctl_init("kernel/random", random_table);
1602 return 0;
1603}
1604device_initcall(random_sysctls_init);
248045b8 1605#endif /* CONFIG_SYSCTL */
1da177e4 1606
77760fd7
JD
1607static atomic_t batch_generation = ATOMIC_INIT(0);
1608
f5b98461
JD
1609struct batched_entropy {
1610 union {
186873c5
JD
1611 /*
1612 * We make this 1.5x a ChaCha block, so that we get the
1613 * remaining 32 bytes from fast key erasure, plus one full
1614 * block from the detached ChaCha state. We can increase
1615 * the size of this later if needed so long as we keep the
1616 * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE.
1617 */
1618 u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
1619 u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
f5b98461 1620 };
77760fd7 1621 local_lock_t lock;
f5b98461 1622 unsigned int position;
77760fd7 1623 int generation;
f5b98461 1624};
b1132dea 1625
1da177e4 1626/*
f5b98461 1627 * Get a random word for internal kernel use only. The quality of the random
186873c5
JD
1628 * number is good as /dev/urandom. In order to ensure that the randomness
1629 * provided by this function is okay, the function wait_for_random_bytes()
1630 * should be called and return 0 at least once at any point prior.
1da177e4 1631 */
b7d5dc21 1632static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
186873c5
JD
1633 .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock),
1634 .position = UINT_MAX
b7d5dc21
SAS
1635};
1636
c440408c 1637u64 get_random_u64(void)
1da177e4 1638{
c440408c 1639 u64 ret;
b7d5dc21 1640 unsigned long flags;
f5b98461 1641 struct batched_entropy *batch;
eecabf56 1642 static void *previous;
77760fd7 1643 int next_gen;
8a0a9bd4 1644
eecabf56 1645 warn_unseeded_randomness(&previous);
d06bfd19 1646
77760fd7 1647 local_lock_irqsave(&batched_entropy_u64.lock, flags);
b7d5dc21 1648 batch = raw_cpu_ptr(&batched_entropy_u64);
77760fd7
JD
1649
1650 next_gen = atomic_read(&batch_generation);
186873c5 1651 if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
77760fd7 1652 next_gen != batch->generation) {
186873c5 1653 _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
f5b98461 1654 batch->position = 0;
77760fd7 1655 batch->generation = next_gen;
f5b98461 1656 }
77760fd7 1657
186873c5
JD
1658 ret = batch->entropy_u64[batch->position];
1659 batch->entropy_u64[batch->position] = 0;
1660 ++batch->position;
77760fd7 1661 local_unlock_irqrestore(&batched_entropy_u64.lock, flags);
8a0a9bd4 1662 return ret;
1da177e4 1663}
c440408c 1664EXPORT_SYMBOL(get_random_u64);
1da177e4 1665
b7d5dc21 1666static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
186873c5
JD
1667 .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock),
1668 .position = UINT_MAX
b7d5dc21 1669};
77760fd7 1670
c440408c 1671u32 get_random_u32(void)
f5b98461 1672{
c440408c 1673 u32 ret;
b7d5dc21 1674 unsigned long flags;
f5b98461 1675 struct batched_entropy *batch;
eecabf56 1676 static void *previous;
77760fd7 1677 int next_gen;
ec9ee4ac 1678
eecabf56 1679 warn_unseeded_randomness(&previous);
d06bfd19 1680
77760fd7 1681 local_lock_irqsave(&batched_entropy_u32.lock, flags);
b7d5dc21 1682 batch = raw_cpu_ptr(&batched_entropy_u32);
77760fd7
JD
1683
1684 next_gen = atomic_read(&batch_generation);
186873c5 1685 if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
77760fd7 1686 next_gen != batch->generation) {
186873c5 1687 _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
f5b98461 1688 batch->position = 0;
77760fd7 1689 batch->generation = next_gen;
f5b98461 1690 }
77760fd7 1691
186873c5
JD
1692 ret = batch->entropy_u32[batch->position];
1693 batch->entropy_u32[batch->position] = 0;
1694 ++batch->position;
77760fd7 1695 local_unlock_irqrestore(&batched_entropy_u32.lock, flags);
ec9ee4ac
DC
1696 return ret;
1697}
c440408c 1698EXPORT_SYMBOL(get_random_u32);
ec9ee4ac 1699
b169c13d
JD
1700/* It's important to invalidate all potential batched entropy that might
1701 * be stored before the crng is initialized, which we can do lazily by
77760fd7
JD
1702 * bumping the generation counter.
1703 */
b169c13d
JD
1704static void invalidate_batched_entropy(void)
1705{
77760fd7 1706 atomic_inc(&batch_generation);
b169c13d
JD
1707}
1708
99fdafde
JC
1709/**
1710 * randomize_page - Generate a random, page aligned address
1711 * @start: The smallest acceptable address the caller will take.
1712 * @range: The size of the area, starting at @start, within which the
1713 * random address must fall.
1714 *
1715 * If @start + @range would overflow, @range is capped.
1716 *
1717 * NOTE: Historical use of randomize_range, which this replaces, presumed that
1718 * @start was already page aligned. We now align it regardless.
1719 *
1720 * Return: A page aligned address within [start, start + range). On error,
1721 * @start is returned.
1722 */
248045b8 1723unsigned long randomize_page(unsigned long start, unsigned long range)
99fdafde
JC
1724{
1725 if (!PAGE_ALIGNED(start)) {
1726 range -= PAGE_ALIGN(start) - start;
1727 start = PAGE_ALIGN(start);
1728 }
1729
1730 if (start > ULONG_MAX - range)
1731 range = ULONG_MAX - start;
1732
1733 range >>= PAGE_SHIFT;
1734
1735 if (range == 0)
1736 return start;
1737
1738 return start + (get_random_long() % range << PAGE_SHIFT);
1739}
1740
c84dbf61
TD
1741/* Interface for in-kernel drivers of true hardware RNGs.
1742 * Those devices may produce endless random bits and will be throttled
1743 * when our pool is full.
1744 */
04ec96b7 1745void add_hwgenerator_randomness(const void *buffer, size_t count,
c84dbf61
TD
1746 size_t entropy)
1747{
43838a23 1748 if (unlikely(crng_init == 0)) {
73c7733f 1749 size_t ret = crng_fast_load(buffer, count);
90ed1e67 1750 mix_pool_bytes(buffer, ret);
73c7733f
JD
1751 count -= ret;
1752 buffer += ret;
1753 if (!count || crng_init == 0)
1754 return;
3371f3da 1755 }
e192be9d 1756
c321e907 1757 /* Throttle writing if we're above the trickle threshold.
489c7fc4
JD
1758 * We'll be woken up again once below POOL_MIN_BITS, when
1759 * the calling thread is about to terminate, or once
1760 * CRNG_RESEED_INTERVAL has elapsed.
e192be9d 1761 */
c321e907 1762 wait_event_interruptible_timeout(random_write_wait,
f7e67b8e 1763 !system_wq || kthread_should_stop() ||
489c7fc4 1764 input_pool.entropy_count < POOL_MIN_BITS,
c321e907 1765 CRNG_RESEED_INTERVAL);
90ed1e67
JD
1766 mix_pool_bytes(buffer, count);
1767 credit_entropy_bits(entropy);
c84dbf61
TD
1768}
1769EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
428826f5
HYW
1770
1771/* Handle random seed passed by bootloader.
1772 * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise
1773 * it would be regarded as device data.
1774 * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER.
1775 */
04ec96b7 1776void add_bootloader_randomness(const void *buf, size_t size)
428826f5
HYW
1777{
1778 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
1779 add_hwgenerator_randomness(buf, size, size * 8);
1780 else
1781 add_device_randomness(buf, size);
1782}
3fd57e7a 1783EXPORT_SYMBOL_GPL(add_bootloader_randomness);