Commit | Line | Data |
---|---|---|
a07fdae3 | 1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) |
1da177e4 | 2 | /* |
9f9eff85 | 3 | * Copyright (C) 2017-2022 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved. |
9e95ce27 | 4 | * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 |
5f75d9f3 JD |
5 | * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All rights reserved. |
6 | * | |
7 | * This driver produces cryptographically secure pseudorandom data. It is divided | |
8 | * into roughly six sections, each with a section header: | |
9 | * | |
10 | * - Initialization and readiness waiting. | |
11 | * - Fast key erasure RNG, the "crng". | |
12 | * - Entropy accumulation and extraction routines. | |
13 | * - Entropy collection routines. | |
14 | * - Userspace reader/writer interfaces. | |
15 | * - Sysctl interface. | |
16 | * | |
17 | * The high level overview is that there is one input pool, into which | |
e85c0fc1 JD |
18 | * various pieces of data are hashed. Prior to initialization, some of that |
19 | * data is then "credited" as having a certain number of bits of entropy. | |
20 | * When enough bits of entropy are available, the hash is finalized and | |
21 | * handed as a key to a stream cipher that expands it indefinitely for | |
22 | * various consumers. This key is periodically refreshed as the various | |
23 | * entropy collectors, described below, add data to the input pool. | |
1da177e4 LT |
24 | */ |
25 | ||
12cd53af YL |
26 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
27 | ||
1da177e4 | 28 | #include <linux/utsname.h> |
1da177e4 LT |
29 | #include <linux/module.h> |
30 | #include <linux/kernel.h> | |
31 | #include <linux/major.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/fcntl.h> | |
34 | #include <linux/slab.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/poll.h> | |
37 | #include <linux/init.h> | |
38 | #include <linux/fs.h> | |
322cbb50 | 39 | #include <linux/blkdev.h> |
1da177e4 | 40 | #include <linux/interrupt.h> |
27ac792c | 41 | #include <linux/mm.h> |
dd0f0cf5 | 42 | #include <linux/nodemask.h> |
1da177e4 | 43 | #include <linux/spinlock.h> |
c84dbf61 | 44 | #include <linux/kthread.h> |
1da177e4 | 45 | #include <linux/percpu.h> |
775f4b29 | 46 | #include <linux/ptrace.h> |
6265e169 | 47 | #include <linux/workqueue.h> |
0244ad00 | 48 | #include <linux/irq.h> |
4e00b339 | 49 | #include <linux/ratelimit.h> |
c6e9d6f3 TT |
50 | #include <linux/syscalls.h> |
51 | #include <linux/completion.h> | |
8da4b8c4 | 52 | #include <linux/uuid.h> |
87e7d5ab | 53 | #include <linux/uaccess.h> |
b7b67d13 | 54 | #include <linux/suspend.h> |
1ca1b917 | 55 | #include <crypto/chacha.h> |
9f9eff85 | 56 | #include <crypto/blake2s.h> |
1da177e4 | 57 | #include <asm/processor.h> |
1da177e4 | 58 | #include <asm/irq.h> |
775f4b29 | 59 | #include <asm/irq_regs.h> |
1da177e4 LT |
60 | #include <asm/io.h> |
61 | ||
5f1bb112 JD |
62 | /********************************************************************* |
63 | * | |
64 | * Initialization and readiness waiting. | |
65 | * | |
66 | * Much of the RNG infrastructure is devoted to various dependencies | |
67 | * being able to wait until the RNG has collected enough entropy and | |
68 | * is ready for safe consumption. | |
69 | * | |
70 | *********************************************************************/ | |
205a525c | 71 | |
e192be9d TT |
72 | /* |
73 | * crng_init = 0 --> Uninitialized | |
74 | * 1 --> Initialized | |
75 | * 2 --> Initialized from input_pool | |
76 | * | |
5f1bb112 | 77 | * crng_init is protected by base_crng->lock, and only increases |
e192be9d TT |
78 | * its value (from 0->1->2). |
79 | */ | |
80 | static int crng_init = 0; | |
43838a23 | 81 | #define crng_ready() (likely(crng_init > 1)) |
5f1bb112 JD |
82 | /* Various types of waiters for crng_init->2 transition. */ |
83 | static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait); | |
84 | static struct fasync_struct *fasync; | |
5acd3548 JD |
85 | static DEFINE_SPINLOCK(random_ready_chain_lock); |
86 | static RAW_NOTIFIER_HEAD(random_ready_chain); | |
e192be9d | 87 | |
5f1bb112 | 88 | /* Control how we warn userspace. */ |
4e00b339 TT |
89 | static struct ratelimit_state unseeded_warning = |
90 | RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3); | |
0313bc27 LT |
91 | static struct ratelimit_state urandom_warning = |
92 | RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); | |
4e00b339 | 93 | static int ratelimit_disable __read_mostly; |
4e00b339 TT |
94 | module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); |
95 | MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression"); | |
96 | ||
5f1bb112 JD |
97 | /* |
98 | * Returns whether or not the input pool has been seeded and thus guaranteed | |
0313bc27 LT |
99 | * to supply cryptographically secure random numbers. This applies to: the |
100 | * /dev/urandom device, the get_random_bytes function, and the get_random_{u32, | |
101 | * ,u64,int,long} family of functions. | |
5f1bb112 JD |
102 | * |
103 | * Returns: true if the input pool has been seeded. | |
104 | * false if the input pool has not been seeded. | |
105 | */ | |
106 | bool rng_is_initialized(void) | |
107 | { | |
108 | return crng_ready(); | |
109 | } | |
110 | EXPORT_SYMBOL(rng_is_initialized); | |
111 | ||
112 | /* Used by wait_for_random_bytes(), and considered an entropy collector, below. */ | |
113 | static void try_to_generate_entropy(void); | |
114 | ||
115 | /* | |
116 | * Wait for the input pool to be seeded and thus guaranteed to supply | |
0313bc27 LT |
117 | * cryptographically secure random numbers. This applies to: the /dev/urandom |
118 | * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} | |
119 | * family of functions. Using any of these functions without first calling | |
120 | * this function forfeits the guarantee of security. | |
5f1bb112 JD |
121 | * |
122 | * Returns: 0 if the input pool has been seeded. | |
123 | * -ERESTARTSYS if the function was interrupted by a signal. | |
124 | */ | |
125 | int wait_for_random_bytes(void) | |
126 | { | |
a96cfe2d | 127 | while (!crng_ready()) { |
5f1bb112 | 128 | int ret; |
3e504d20 JD |
129 | |
130 | try_to_generate_entropy(); | |
5f1bb112 JD |
131 | ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ); |
132 | if (ret) | |
133 | return ret > 0 ? 0 : ret; | |
a96cfe2d | 134 | } |
5f1bb112 JD |
135 | return 0; |
136 | } | |
137 | EXPORT_SYMBOL(wait_for_random_bytes); | |
138 | ||
139 | /* | |
140 | * Add a callback function that will be invoked when the input | |
141 | * pool is initialised. | |
142 | * | |
143 | * returns: 0 if callback is successfully added | |
144 | * -EALREADY if pool is already initialised (callback not called) | |
5f1bb112 | 145 | */ |
5acd3548 | 146 | int register_random_ready_notifier(struct notifier_block *nb) |
5f1bb112 | 147 | { |
5f1bb112 | 148 | unsigned long flags; |
5acd3548 | 149 | int ret = -EALREADY; |
5f1bb112 JD |
150 | |
151 | if (crng_ready()) | |
5acd3548 | 152 | return ret; |
5f1bb112 | 153 | |
5acd3548 JD |
154 | spin_lock_irqsave(&random_ready_chain_lock, flags); |
155 | if (!crng_ready()) | |
156 | ret = raw_notifier_chain_register(&random_ready_chain, nb); | |
157 | spin_unlock_irqrestore(&random_ready_chain_lock, flags); | |
158 | return ret; | |
5f1bb112 | 159 | } |
5f1bb112 JD |
160 | |
161 | /* | |
162 | * Delete a previously registered readiness callback function. | |
163 | */ | |
5acd3548 | 164 | int unregister_random_ready_notifier(struct notifier_block *nb) |
5f1bb112 JD |
165 | { |
166 | unsigned long flags; | |
5acd3548 | 167 | int ret; |
5f1bb112 | 168 | |
5acd3548 JD |
169 | spin_lock_irqsave(&random_ready_chain_lock, flags); |
170 | ret = raw_notifier_chain_unregister(&random_ready_chain, nb); | |
171 | spin_unlock_irqrestore(&random_ready_chain_lock, flags); | |
172 | return ret; | |
5f1bb112 | 173 | } |
5f1bb112 JD |
174 | |
175 | static void process_random_ready_list(void) | |
176 | { | |
177 | unsigned long flags; | |
5f1bb112 | 178 | |
5acd3548 JD |
179 | spin_lock_irqsave(&random_ready_chain_lock, flags); |
180 | raw_notifier_call_chain(&random_ready_chain, 0, NULL); | |
181 | spin_unlock_irqrestore(&random_ready_chain_lock, flags); | |
5f1bb112 JD |
182 | } |
183 | ||
184 | #define warn_unseeded_randomness(previous) \ | |
185 | _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous)) | |
186 | ||
187 | static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous) | |
188 | { | |
189 | #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM | |
190 | const bool print_once = false; | |
191 | #else | |
192 | static bool print_once __read_mostly; | |
193 | #endif | |
194 | ||
195 | if (print_once || crng_ready() || | |
196 | (previous && (caller == READ_ONCE(*previous)))) | |
197 | return; | |
198 | WRITE_ONCE(*previous, caller); | |
199 | #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM | |
200 | print_once = true; | |
201 | #endif | |
202 | if (__ratelimit(&unseeded_warning)) | |
203 | printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", | |
204 | func_name, caller, crng_init); | |
205 | } | |
206 | ||
207 | ||
3655adc7 | 208 | /********************************************************************* |
1da177e4 | 209 | * |
3655adc7 | 210 | * Fast key erasure RNG, the "crng". |
1da177e4 | 211 | * |
3655adc7 JD |
212 | * These functions expand entropy from the entropy extractor into |
213 | * long streams for external consumption using the "fast key erasure" | |
214 | * RNG described at <https://blog.cr.yp.to/20170723-random.html>. | |
e192be9d | 215 | * |
3655adc7 JD |
216 | * There are a few exported interfaces for use by other drivers: |
217 | * | |
218 | * void get_random_bytes(void *buf, size_t nbytes) | |
219 | * u32 get_random_u32() | |
220 | * u64 get_random_u64() | |
221 | * unsigned int get_random_int() | |
222 | * unsigned long get_random_long() | |
223 | * | |
224 | * These interfaces will return the requested number of random bytes | |
0313bc27 | 225 | * into the given buffer or as a return value. This is equivalent to |
dd7aa36e JD |
226 | * a read from /dev/urandom. The u32, u64, int, and long family of |
227 | * functions may be higher performance for one-off random integers, | |
228 | * because they do a bit of buffering and do not invoke reseeding | |
229 | * until the buffer is emptied. | |
e192be9d TT |
230 | * |
231 | *********************************************************************/ | |
232 | ||
e85c0fc1 JD |
233 | enum { |
234 | CRNG_RESEED_START_INTERVAL = HZ, | |
235 | CRNG_RESEED_INTERVAL = 60 * HZ | |
236 | }; | |
186873c5 JD |
237 | |
238 | static struct { | |
239 | u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long)); | |
240 | unsigned long birth; | |
241 | unsigned long generation; | |
242 | spinlock_t lock; | |
243 | } base_crng = { | |
244 | .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock) | |
245 | }; | |
246 | ||
247 | struct crng { | |
248 | u8 key[CHACHA_KEY_SIZE]; | |
249 | unsigned long generation; | |
250 | local_lock_t lock; | |
251 | }; | |
252 | ||
253 | static DEFINE_PER_CPU(struct crng, crngs) = { | |
254 | .generation = ULONG_MAX, | |
255 | .lock = INIT_LOCAL_LOCK(crngs.lock), | |
256 | }; | |
e192be9d | 257 | |
e85c0fc1 | 258 | /* Used by crng_reseed() and crng_make_state() to extract a new seed from the input pool. */ |
5c3b747e | 259 | static void extract_entropy(void *buf, size_t nbytes); |
e192be9d | 260 | |
e85c0fc1 JD |
261 | /* This extracts a new crng key from the input pool. */ |
262 | static void crng_reseed(void) | |
e192be9d | 263 | { |
248045b8 | 264 | unsigned long flags; |
186873c5 JD |
265 | unsigned long next_gen; |
266 | u8 key[CHACHA_KEY_SIZE]; | |
7191c628 | 267 | bool finalize_init = false; |
e192be9d | 268 | |
e85c0fc1 | 269 | extract_entropy(key, sizeof(key)); |
a9412d51 | 270 | |
186873c5 JD |
271 | /* |
272 | * We copy the new key into the base_crng, overwriting the old one, | |
273 | * and update the generation counter. We avoid hitting ULONG_MAX, | |
274 | * because the per-cpu crngs are initialized to ULONG_MAX, so this | |
275 | * forces new CPUs that come online to always initialize. | |
276 | */ | |
277 | spin_lock_irqsave(&base_crng.lock, flags); | |
278 | memcpy(base_crng.key, key, sizeof(base_crng.key)); | |
279 | next_gen = base_crng.generation + 1; | |
280 | if (next_gen == ULONG_MAX) | |
281 | ++next_gen; | |
282 | WRITE_ONCE(base_crng.generation, next_gen); | |
283 | WRITE_ONCE(base_crng.birth, jiffies); | |
a96cfe2d | 284 | if (!crng_ready()) { |
a9412d51 | 285 | crng_init = 2; |
7191c628 DB |
286 | finalize_init = true; |
287 | } | |
288 | spin_unlock_irqrestore(&base_crng.lock, flags); | |
289 | memzero_explicit(key, sizeof(key)); | |
290 | if (finalize_init) { | |
a9412d51 JD |
291 | process_random_ready_list(); |
292 | wake_up_interruptible(&crng_init_wait); | |
293 | kill_fasync(&fasync, SIGIO, POLL_IN); | |
294 | pr_notice("crng init done\n"); | |
295 | if (unseeded_warning.missed) { | |
296 | pr_notice("%d get_random_xx warning(s) missed due to ratelimiting\n", | |
297 | unseeded_warning.missed); | |
298 | unseeded_warning.missed = 0; | |
299 | } | |
0313bc27 LT |
300 | if (urandom_warning.missed) { |
301 | pr_notice("%d urandom warning(s) missed due to ratelimiting\n", | |
302 | urandom_warning.missed); | |
303 | urandom_warning.missed = 0; | |
304 | } | |
a9412d51 | 305 | } |
e192be9d TT |
306 | } |
307 | ||
186873c5 | 308 | /* |
3655adc7 JD |
309 | * This generates a ChaCha block using the provided key, and then |
310 | * immediately overwites that key with half the block. It returns | |
311 | * the resultant ChaCha state to the user, along with the second | |
312 | * half of the block containing 32 bytes of random data that may | |
313 | * be used; random_data_len may not be greater than 32. | |
8717627d JD |
314 | * |
315 | * The returned ChaCha state contains within it a copy of the old | |
316 | * key value, at index 4, so the state should always be zeroed out | |
317 | * immediately after using in order to maintain forward secrecy. | |
318 | * If the state cannot be erased in a timely manner, then it is | |
319 | * safer to set the random_data parameter to &chacha_state[4] so | |
320 | * that this function overwrites it before returning. | |
186873c5 JD |
321 | */ |
322 | static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE], | |
323 | u32 chacha_state[CHACHA_STATE_WORDS], | |
324 | u8 *random_data, size_t random_data_len) | |
e192be9d | 325 | { |
186873c5 | 326 | u8 first_block[CHACHA_BLOCK_SIZE]; |
009ba856 | 327 | |
186873c5 JD |
328 | BUG_ON(random_data_len > 32); |
329 | ||
330 | chacha_init_consts(chacha_state); | |
331 | memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE); | |
332 | memset(&chacha_state[12], 0, sizeof(u32) * 4); | |
333 | chacha20_block(chacha_state, first_block); | |
334 | ||
335 | memcpy(key, first_block, CHACHA_KEY_SIZE); | |
8717627d | 336 | memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len); |
186873c5 | 337 | memzero_explicit(first_block, sizeof(first_block)); |
1e7f583a TT |
338 | } |
339 | ||
7a7ff644 | 340 | /* |
e85c0fc1 JD |
341 | * Return whether the crng seed is considered to be sufficiently old |
342 | * that a reseeding is needed. This happens if the last reseeding | |
343 | * was CRNG_RESEED_INTERVAL ago, or during early boot, at an interval | |
344 | * proportional to the uptime. | |
7a7ff644 JD |
345 | */ |
346 | static bool crng_has_old_seed(void) | |
347 | { | |
348 | static bool early_boot = true; | |
349 | unsigned long interval = CRNG_RESEED_INTERVAL; | |
350 | ||
351 | if (unlikely(READ_ONCE(early_boot))) { | |
352 | time64_t uptime = ktime_get_seconds(); | |
353 | if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2) | |
354 | WRITE_ONCE(early_boot, false); | |
355 | else | |
e85c0fc1 | 356 | interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL, |
7a7ff644 JD |
357 | (unsigned int)uptime / 2 * HZ); |
358 | } | |
359 | return time_after(jiffies, READ_ONCE(base_crng.birth) + interval); | |
360 | } | |
361 | ||
c92e040d | 362 | /* |
186873c5 JD |
363 | * This function returns a ChaCha state that you may use for generating |
364 | * random data. It also returns up to 32 bytes on its own of random data | |
365 | * that may be used; random_data_len may not be greater than 32. | |
c92e040d | 366 | */ |
186873c5 JD |
367 | static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS], |
368 | u8 *random_data, size_t random_data_len) | |
c92e040d | 369 | { |
248045b8 | 370 | unsigned long flags; |
186873c5 | 371 | struct crng *crng; |
c92e040d | 372 | |
186873c5 JD |
373 | BUG_ON(random_data_len > 32); |
374 | ||
375 | /* | |
376 | * For the fast path, we check whether we're ready, unlocked first, and | |
377 | * then re-check once locked later. In the case where we're really not | |
5c3b747e JD |
378 | * ready, we do fast key erasure with the base_crng directly, extracting |
379 | * when crng_init==0. | |
186873c5 | 380 | */ |
a96cfe2d | 381 | if (!crng_ready()) { |
186873c5 JD |
382 | bool ready; |
383 | ||
384 | spin_lock_irqsave(&base_crng.lock, flags); | |
385 | ready = crng_ready(); | |
5c3b747e JD |
386 | if (!ready) { |
387 | if (crng_init == 0) | |
388 | extract_entropy(base_crng.key, sizeof(base_crng.key)); | |
186873c5 JD |
389 | crng_fast_key_erasure(base_crng.key, chacha_state, |
390 | random_data, random_data_len); | |
5c3b747e | 391 | } |
186873c5 JD |
392 | spin_unlock_irqrestore(&base_crng.lock, flags); |
393 | if (!ready) | |
394 | return; | |
c92e040d | 395 | } |
186873c5 JD |
396 | |
397 | /* | |
e85c0fc1 JD |
398 | * If the base_crng is old enough, we reseed, which in turn bumps the |
399 | * generation counter that we check below. | |
186873c5 | 400 | */ |
7a7ff644 | 401 | if (unlikely(crng_has_old_seed())) |
e85c0fc1 | 402 | crng_reseed(); |
186873c5 JD |
403 | |
404 | local_lock_irqsave(&crngs.lock, flags); | |
405 | crng = raw_cpu_ptr(&crngs); | |
406 | ||
407 | /* | |
408 | * If our per-cpu crng is older than the base_crng, then it means | |
409 | * somebody reseeded the base_crng. In that case, we do fast key | |
410 | * erasure on the base_crng, and use its output as the new key | |
411 | * for our per-cpu crng. This brings us up to date with base_crng. | |
412 | */ | |
413 | if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) { | |
414 | spin_lock(&base_crng.lock); | |
415 | crng_fast_key_erasure(base_crng.key, chacha_state, | |
416 | crng->key, sizeof(crng->key)); | |
417 | crng->generation = base_crng.generation; | |
418 | spin_unlock(&base_crng.lock); | |
419 | } | |
420 | ||
421 | /* | |
422 | * Finally, when we've made it this far, our per-cpu crng has an up | |
423 | * to date key, and we can do fast key erasure with it to produce | |
424 | * some random data and a ChaCha state for the caller. All other | |
425 | * branches of this function are "unlikely", so most of the time we | |
426 | * should wind up here immediately. | |
427 | */ | |
428 | crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len); | |
429 | local_unlock_irqrestore(&crngs.lock, flags); | |
c92e040d TT |
430 | } |
431 | ||
3655adc7 | 432 | static void _get_random_bytes(void *buf, size_t nbytes) |
e192be9d | 433 | { |
186873c5 | 434 | u32 chacha_state[CHACHA_STATE_WORDS]; |
3655adc7 JD |
435 | u8 tmp[CHACHA_BLOCK_SIZE]; |
436 | size_t len; | |
437 | ||
438 | if (!nbytes) | |
439 | return; | |
440 | ||
441 | len = min_t(size_t, 32, nbytes); | |
442 | crng_make_state(chacha_state, buf, len); | |
443 | nbytes -= len; | |
444 | buf += len; | |
445 | ||
446 | while (nbytes) { | |
447 | if (nbytes < CHACHA_BLOCK_SIZE) { | |
448 | chacha20_block(chacha_state, tmp); | |
449 | memcpy(buf, tmp, nbytes); | |
450 | memzero_explicit(tmp, sizeof(tmp)); | |
451 | break; | |
452 | } | |
453 | ||
454 | chacha20_block(chacha_state, buf); | |
455 | if (unlikely(chacha_state[12] == 0)) | |
456 | ++chacha_state[13]; | |
457 | nbytes -= CHACHA_BLOCK_SIZE; | |
458 | buf += CHACHA_BLOCK_SIZE; | |
459 | } | |
460 | ||
461 | memzero_explicit(chacha_state, sizeof(chacha_state)); | |
462 | } | |
463 | ||
464 | /* | |
465 | * This function is the exported kernel interface. It returns some | |
466 | * number of good random numbers, suitable for key generation, seeding | |
467 | * TCP sequence numbers, etc. It does not rely on the hardware random | |
468 | * number generator. For random bytes direct from the hardware RNG | |
469 | * (when available), use get_random_bytes_arch(). In order to ensure | |
470 | * that the randomness provided by this function is okay, the function | |
471 | * wait_for_random_bytes() should be called and return 0 at least once | |
472 | * at any point prior. | |
473 | */ | |
474 | void get_random_bytes(void *buf, size_t nbytes) | |
475 | { | |
476 | static void *previous; | |
477 | ||
478 | warn_unseeded_randomness(&previous); | |
479 | _get_random_bytes(buf, nbytes); | |
480 | } | |
481 | EXPORT_SYMBOL(get_random_bytes); | |
482 | ||
483 | static ssize_t get_random_bytes_user(void __user *buf, size_t nbytes) | |
484 | { | |
5209aed5 | 485 | size_t len, left, ret = 0; |
3655adc7 JD |
486 | u32 chacha_state[CHACHA_STATE_WORDS]; |
487 | u8 output[CHACHA_BLOCK_SIZE]; | |
488 | ||
489 | if (!nbytes) | |
490 | return 0; | |
491 | ||
aba120cc JD |
492 | /* |
493 | * Immediately overwrite the ChaCha key at index 4 with random | |
494 | * bytes, in case userspace causes copy_to_user() below to sleep | |
495 | * forever, so that we still retain forward secrecy in that case. | |
496 | */ | |
497 | crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); | |
498 | /* | |
499 | * However, if we're doing a read of len <= 32, we don't need to | |
500 | * use chacha_state after, so we can simply return those bytes to | |
501 | * the user directly. | |
502 | */ | |
503 | if (nbytes <= CHACHA_KEY_SIZE) { | |
5209aed5 | 504 | ret = nbytes - copy_to_user(buf, &chacha_state[4], nbytes); |
aba120cc JD |
505 | goto out_zero_chacha; |
506 | } | |
3655adc7 | 507 | |
5209aed5 | 508 | for (;;) { |
3655adc7 JD |
509 | chacha20_block(chacha_state, output); |
510 | if (unlikely(chacha_state[12] == 0)) | |
511 | ++chacha_state[13]; | |
512 | ||
513 | len = min_t(size_t, nbytes, CHACHA_BLOCK_SIZE); | |
5209aed5 JD |
514 | left = copy_to_user(buf, output, len); |
515 | if (left) { | |
516 | ret += len - left; | |
3655adc7 JD |
517 | break; |
518 | } | |
519 | ||
3655adc7 JD |
520 | buf += len; |
521 | ret += len; | |
5209aed5 JD |
522 | nbytes -= len; |
523 | if (!nbytes) | |
524 | break; | |
e3c1c4fd JD |
525 | |
526 | BUILD_BUG_ON(PAGE_SIZE % CHACHA_BLOCK_SIZE != 0); | |
5209aed5 | 527 | if (ret % PAGE_SIZE == 0) { |
e3c1c4fd JD |
528 | if (signal_pending(current)) |
529 | break; | |
530 | cond_resched(); | |
531 | } | |
5209aed5 | 532 | } |
3655adc7 | 533 | |
3655adc7 | 534 | memzero_explicit(output, sizeof(output)); |
aba120cc JD |
535 | out_zero_chacha: |
536 | memzero_explicit(chacha_state, sizeof(chacha_state)); | |
5209aed5 | 537 | return ret ? ret : -EFAULT; |
3655adc7 JD |
538 | } |
539 | ||
540 | /* | |
541 | * Batched entropy returns random integers. The quality of the random | |
542 | * number is good as /dev/urandom. In order to ensure that the randomness | |
543 | * provided by this function is okay, the function wait_for_random_bytes() | |
544 | * should be called and return 0 at least once at any point prior. | |
545 | */ | |
546 | struct batched_entropy { | |
547 | union { | |
548 | /* | |
549 | * We make this 1.5x a ChaCha block, so that we get the | |
550 | * remaining 32 bytes from fast key erasure, plus one full | |
551 | * block from the detached ChaCha state. We can increase | |
552 | * the size of this later if needed so long as we keep the | |
553 | * formula of (integer_blocks + 0.5) * CHACHA_BLOCK_SIZE. | |
554 | */ | |
555 | u64 entropy_u64[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u64))]; | |
556 | u32 entropy_u32[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(u32))]; | |
557 | }; | |
558 | local_lock_t lock; | |
559 | unsigned long generation; | |
560 | unsigned int position; | |
561 | }; | |
562 | ||
563 | ||
564 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { | |
565 | .lock = INIT_LOCAL_LOCK(batched_entropy_u64.lock), | |
566 | .position = UINT_MAX | |
567 | }; | |
568 | ||
569 | u64 get_random_u64(void) | |
570 | { | |
571 | u64 ret; | |
572 | unsigned long flags; | |
573 | struct batched_entropy *batch; | |
574 | static void *previous; | |
575 | unsigned long next_gen; | |
576 | ||
577 | warn_unseeded_randomness(&previous); | |
578 | ||
cbe89e5a JD |
579 | if (!crng_ready()) { |
580 | _get_random_bytes(&ret, sizeof(ret)); | |
581 | return ret; | |
582 | } | |
583 | ||
3655adc7 JD |
584 | local_lock_irqsave(&batched_entropy_u64.lock, flags); |
585 | batch = raw_cpu_ptr(&batched_entropy_u64); | |
586 | ||
587 | next_gen = READ_ONCE(base_crng.generation); | |
588 | if (batch->position >= ARRAY_SIZE(batch->entropy_u64) || | |
589 | next_gen != batch->generation) { | |
590 | _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64)); | |
591 | batch->position = 0; | |
592 | batch->generation = next_gen; | |
593 | } | |
594 | ||
595 | ret = batch->entropy_u64[batch->position]; | |
596 | batch->entropy_u64[batch->position] = 0; | |
597 | ++batch->position; | |
598 | local_unlock_irqrestore(&batched_entropy_u64.lock, flags); | |
599 | return ret; | |
600 | } | |
601 | EXPORT_SYMBOL(get_random_u64); | |
602 | ||
603 | static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = { | |
604 | .lock = INIT_LOCAL_LOCK(batched_entropy_u32.lock), | |
605 | .position = UINT_MAX | |
606 | }; | |
607 | ||
608 | u32 get_random_u32(void) | |
609 | { | |
610 | u32 ret; | |
611 | unsigned long flags; | |
612 | struct batched_entropy *batch; | |
613 | static void *previous; | |
614 | unsigned long next_gen; | |
615 | ||
616 | warn_unseeded_randomness(&previous); | |
617 | ||
cbe89e5a JD |
618 | if (!crng_ready()) { |
619 | _get_random_bytes(&ret, sizeof(ret)); | |
620 | return ret; | |
621 | } | |
622 | ||
3655adc7 JD |
623 | local_lock_irqsave(&batched_entropy_u32.lock, flags); |
624 | batch = raw_cpu_ptr(&batched_entropy_u32); | |
625 | ||
626 | next_gen = READ_ONCE(base_crng.generation); | |
627 | if (batch->position >= ARRAY_SIZE(batch->entropy_u32) || | |
628 | next_gen != batch->generation) { | |
629 | _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32)); | |
630 | batch->position = 0; | |
631 | batch->generation = next_gen; | |
632 | } | |
633 | ||
634 | ret = batch->entropy_u32[batch->position]; | |
635 | batch->entropy_u32[batch->position] = 0; | |
636 | ++batch->position; | |
637 | local_unlock_irqrestore(&batched_entropy_u32.lock, flags); | |
638 | return ret; | |
639 | } | |
640 | EXPORT_SYMBOL(get_random_u32); | |
641 | ||
3191dd5a JD |
642 | #ifdef CONFIG_SMP |
643 | /* | |
644 | * This function is called when the CPU is coming up, with entry | |
645 | * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. | |
646 | */ | |
647 | int random_prepare_cpu(unsigned int cpu) | |
648 | { | |
649 | /* | |
650 | * When the cpu comes back online, immediately invalidate both | |
651 | * the per-cpu crng and all batches, so that we serve fresh | |
652 | * randomness. | |
653 | */ | |
654 | per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX; | |
655 | per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX; | |
656 | per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX; | |
657 | return 0; | |
658 | } | |
659 | #endif | |
660 | ||
3655adc7 JD |
661 | /** |
662 | * randomize_page - Generate a random, page aligned address | |
663 | * @start: The smallest acceptable address the caller will take. | |
664 | * @range: The size of the area, starting at @start, within which the | |
665 | * random address must fall. | |
666 | * | |
667 | * If @start + @range would overflow, @range is capped. | |
668 | * | |
669 | * NOTE: Historical use of randomize_range, which this replaces, presumed that | |
670 | * @start was already page aligned. We now align it regardless. | |
671 | * | |
672 | * Return: A page aligned address within [start, start + range). On error, | |
673 | * @start is returned. | |
674 | */ | |
675 | unsigned long randomize_page(unsigned long start, unsigned long range) | |
676 | { | |
677 | if (!PAGE_ALIGNED(start)) { | |
678 | range -= PAGE_ALIGN(start) - start; | |
679 | start = PAGE_ALIGN(start); | |
680 | } | |
681 | ||
682 | if (start > ULONG_MAX - range) | |
683 | range = ULONG_MAX - start; | |
684 | ||
685 | range >>= PAGE_SHIFT; | |
686 | ||
687 | if (range == 0) | |
688 | return start; | |
689 | ||
690 | return start + (get_random_long() % range << PAGE_SHIFT); | |
691 | } | |
692 | ||
693 | /* | |
694 | * This function will use the architecture-specific hardware random | |
695 | * number generator if it is available. It is not recommended for | |
696 | * use. Use get_random_bytes() instead. It returns the number of | |
697 | * bytes filled in. | |
698 | */ | |
699 | size_t __must_check get_random_bytes_arch(void *buf, size_t nbytes) | |
700 | { | |
701 | size_t left = nbytes; | |
702 | u8 *p = buf; | |
703 | ||
704 | while (left) { | |
705 | unsigned long v; | |
706 | size_t chunk = min_t(size_t, left, sizeof(unsigned long)); | |
707 | ||
708 | if (!arch_get_random_long(&v)) | |
709 | break; | |
710 | ||
711 | memcpy(p, &v, chunk); | |
712 | p += chunk; | |
713 | left -= chunk; | |
714 | } | |
715 | ||
716 | return nbytes - left; | |
717 | } | |
718 | EXPORT_SYMBOL(get_random_bytes_arch); | |
719 | ||
a5ed7cb1 JD |
720 | |
721 | /********************************************************************** | |
722 | * | |
723 | * Entropy accumulation and extraction routines. | |
724 | * | |
725 | * Callers may add entropy via: | |
726 | * | |
727 | * static void mix_pool_bytes(const void *in, size_t nbytes) | |
728 | * | |
729 | * After which, if added entropy should be credited: | |
730 | * | |
e85c0fc1 | 731 | * static void credit_init_bits(size_t nbits) |
a5ed7cb1 | 732 | * |
e85c0fc1 | 733 | * Finally, extract entropy via: |
a5ed7cb1 JD |
734 | * |
735 | * static void extract_entropy(void *buf, size_t nbytes) | |
a5ed7cb1 JD |
736 | * |
737 | **********************************************************************/ | |
738 | ||
3655adc7 JD |
739 | enum { |
740 | POOL_BITS = BLAKE2S_HASH_SIZE * 8, | |
e85c0fc1 JD |
741 | POOL_INIT_BITS = POOL_BITS, /* No point in settling for less. */ |
742 | POOL_FAST_INIT_BITS = POOL_INIT_BITS / 2 | |
3655adc7 JD |
743 | }; |
744 | ||
3655adc7 JD |
745 | static struct { |
746 | struct blake2s_state hash; | |
747 | spinlock_t lock; | |
e85c0fc1 | 748 | unsigned int init_bits; |
3655adc7 JD |
749 | } input_pool = { |
750 | .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE), | |
751 | BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4, | |
752 | BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 }, | |
753 | .hash.outlen = BLAKE2S_HASH_SIZE, | |
754 | .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock), | |
755 | }; | |
756 | ||
a5ed7cb1 JD |
757 | static void _mix_pool_bytes(const void *in, size_t nbytes) |
758 | { | |
759 | blake2s_update(&input_pool.hash, in, nbytes); | |
760 | } | |
3655adc7 JD |
761 | |
762 | /* | |
e85c0fc1 JD |
763 | * This function adds bytes into the input pool. It does not |
764 | * update the initialization bit counter; the caller should call | |
765 | * credit_init_bits if this is appropriate. | |
3655adc7 | 766 | */ |
a5ed7cb1 | 767 | static void mix_pool_bytes(const void *in, size_t nbytes) |
3655adc7 | 768 | { |
a5ed7cb1 JD |
769 | unsigned long flags; |
770 | ||
771 | spin_lock_irqsave(&input_pool.lock, flags); | |
772 | _mix_pool_bytes(in, nbytes); | |
773 | spin_unlock_irqrestore(&input_pool.lock, flags); | |
3655adc7 JD |
774 | } |
775 | ||
a5ed7cb1 JD |
776 | /* |
777 | * This is an HKDF-like construction for using the hashed collected entropy | |
778 | * as a PRF key, that's then expanded block-by-block. | |
779 | */ | |
780 | static void extract_entropy(void *buf, size_t nbytes) | |
3655adc7 JD |
781 | { |
782 | unsigned long flags; | |
a5ed7cb1 JD |
783 | u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE]; |
784 | struct { | |
785 | unsigned long rdseed[32 / sizeof(long)]; | |
786 | size_t counter; | |
787 | } block; | |
788 | size_t i; | |
789 | ||
790 | for (i = 0; i < ARRAY_SIZE(block.rdseed); ++i) { | |
791 | if (!arch_get_random_seed_long(&block.rdseed[i]) && | |
792 | !arch_get_random_long(&block.rdseed[i])) | |
793 | block.rdseed[i] = random_get_entropy(); | |
794 | } | |
3655adc7 JD |
795 | |
796 | spin_lock_irqsave(&input_pool.lock, flags); | |
a5ed7cb1 JD |
797 | |
798 | /* seed = HASHPRF(last_key, entropy_input) */ | |
799 | blake2s_final(&input_pool.hash, seed); | |
800 | ||
801 | /* next_key = HASHPRF(seed, RDSEED || 0) */ | |
802 | block.counter = 0; | |
803 | blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed)); | |
804 | blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key)); | |
805 | ||
3655adc7 | 806 | spin_unlock_irqrestore(&input_pool.lock, flags); |
a5ed7cb1 JD |
807 | memzero_explicit(next_key, sizeof(next_key)); |
808 | ||
809 | while (nbytes) { | |
810 | i = min_t(size_t, nbytes, BLAKE2S_HASH_SIZE); | |
811 | /* output = HASHPRF(seed, RDSEED || ++counter) */ | |
812 | ++block.counter; | |
813 | blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed)); | |
814 | nbytes -= i; | |
815 | buf += i; | |
816 | } | |
817 | ||
818 | memzero_explicit(seed, sizeof(seed)); | |
819 | memzero_explicit(&block, sizeof(block)); | |
820 | } | |
821 | ||
e85c0fc1 | 822 | static void credit_init_bits(size_t nbits) |
5c3b747e | 823 | { |
e85c0fc1 | 824 | unsigned int init_bits, orig, add; |
5c3b747e JD |
825 | unsigned long flags; |
826 | ||
e85c0fc1 | 827 | if (crng_ready() || !nbits) |
5c3b747e JD |
828 | return; |
829 | ||
830 | add = min_t(size_t, nbits, POOL_BITS); | |
831 | ||
832 | do { | |
e85c0fc1 JD |
833 | orig = READ_ONCE(input_pool.init_bits); |
834 | init_bits = min_t(unsigned int, POOL_BITS, orig + add); | |
835 | } while (cmpxchg(&input_pool.init_bits, orig, init_bits) != orig); | |
5c3b747e | 836 | |
e85c0fc1 JD |
837 | if (!crng_ready() && init_bits >= POOL_INIT_BITS) |
838 | crng_reseed(); | |
839 | else if (unlikely(crng_init == 0 && init_bits >= POOL_FAST_INIT_BITS)) { | |
5c3b747e JD |
840 | spin_lock_irqsave(&base_crng.lock, flags); |
841 | if (crng_init == 0) { | |
842 | extract_entropy(base_crng.key, sizeof(base_crng.key)); | |
843 | crng_init = 1; | |
844 | } | |
845 | spin_unlock_irqrestore(&base_crng.lock, flags); | |
846 | } | |
847 | } | |
848 | ||
92c653cf JD |
849 | |
850 | /********************************************************************** | |
851 | * | |
852 | * Entropy collection routines. | |
853 | * | |
854 | * The following exported functions are used for pushing entropy into | |
855 | * the above entropy accumulation routines: | |
856 | * | |
857 | * void add_device_randomness(const void *buf, size_t size); | |
92c653cf JD |
858 | * void add_hwgenerator_randomness(const void *buffer, size_t count, |
859 | * size_t entropy); | |
860 | * void add_bootloader_randomness(const void *buf, size_t size); | |
ae099e8e | 861 | * void add_vmfork_randomness(const void *unique_vm_id, size_t size); |
92c653cf | 862 | * void add_interrupt_randomness(int irq); |
a4b5c26b JD |
863 | * void add_input_randomness(unsigned int type, unsigned int code, |
864 | * unsigned int value); | |
865 | * void add_disk_randomness(struct gendisk *disk); | |
92c653cf JD |
866 | * |
867 | * add_device_randomness() adds data to the input pool that | |
868 | * is likely to differ between two devices (or possibly even per boot). | |
869 | * This would be things like MAC addresses or serial numbers, or the | |
870 | * read-out of the RTC. This does *not* credit any actual entropy to | |
871 | * the pool, but it initializes the pool to different values for devices | |
872 | * that might otherwise be identical and have very little entropy | |
873 | * available to them (particularly common in the embedded world). | |
874 | * | |
92c653cf JD |
875 | * add_hwgenerator_randomness() is for true hardware RNGs, and will credit |
876 | * entropy as specified by the caller. If the entropy pool is full it will | |
877 | * block until more entropy is needed. | |
878 | * | |
5c3b747e JD |
879 | * add_bootloader_randomness() is called by bootloader drivers, such as EFI |
880 | * and device tree, and credits its input depending on whether or not the | |
881 | * configuration option CONFIG_RANDOM_TRUST_BOOTLOADER is set. | |
92c653cf | 882 | * |
ae099e8e JD |
883 | * add_vmfork_randomness() adds a unique (but not necessarily secret) ID |
884 | * representing the current instance of a VM to the pool, without crediting, | |
885 | * and then force-reseeds the crng so that it takes effect immediately. | |
886 | * | |
92c653cf JD |
887 | * add_interrupt_randomness() uses the interrupt timing as random |
888 | * inputs to the entropy pool. Using the cycle counters and the irq source | |
889 | * as inputs, it feeds the input pool roughly once a second or after 64 | |
890 | * interrupts, crediting 1 bit of entropy for whichever comes first. | |
891 | * | |
a4b5c26b JD |
892 | * add_input_randomness() uses the input layer interrupt timing, as well |
893 | * as the event type information from the hardware. | |
894 | * | |
895 | * add_disk_randomness() uses what amounts to the seek time of block | |
896 | * layer request events, on a per-disk_devt basis, as input to the | |
897 | * entropy pool. Note that high-speed solid state drives with very low | |
898 | * seek times do not make for good sources of entropy, as their seek | |
899 | * times are usually fairly consistent. | |
900 | * | |
901 | * The last two routines try to estimate how many bits of entropy | |
902 | * to credit. They do this by keeping track of the first and second | |
903 | * order deltas of the event timings. | |
904 | * | |
92c653cf JD |
905 | **********************************************************************/ |
906 | ||
907 | static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); | |
d97c68d1 | 908 | static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); |
92c653cf JD |
909 | static int __init parse_trust_cpu(char *arg) |
910 | { | |
911 | return kstrtobool(arg, &trust_cpu); | |
912 | } | |
d97c68d1 JD |
913 | static int __init parse_trust_bootloader(char *arg) |
914 | { | |
915 | return kstrtobool(arg, &trust_bootloader); | |
916 | } | |
92c653cf | 917 | early_param("random.trust_cpu", parse_trust_cpu); |
d97c68d1 | 918 | early_param("random.trust_bootloader", parse_trust_bootloader); |
3655adc7 | 919 | |
b7b67d13 JD |
920 | static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data) |
921 | { | |
922 | unsigned long flags, entropy = random_get_entropy(); | |
923 | ||
924 | /* | |
925 | * Encode a representation of how long the system has been suspended, | |
926 | * in a way that is distinct from prior system suspends. | |
927 | */ | |
928 | ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() }; | |
929 | ||
930 | spin_lock_irqsave(&input_pool.lock, flags); | |
931 | _mix_pool_bytes(&action, sizeof(action)); | |
932 | _mix_pool_bytes(stamps, sizeof(stamps)); | |
933 | _mix_pool_bytes(&entropy, sizeof(entropy)); | |
934 | spin_unlock_irqrestore(&input_pool.lock, flags); | |
935 | ||
936 | if (crng_ready() && (action == PM_RESTORE_PREPARE || | |
937 | (action == PM_POST_SUSPEND && | |
938 | !IS_ENABLED(CONFIG_PM_AUTOSLEEP) && !IS_ENABLED(CONFIG_ANDROID)))) { | |
e85c0fc1 | 939 | crng_reseed(); |
b7b67d13 JD |
940 | pr_notice("crng reseeded on system resumption\n"); |
941 | } | |
942 | return 0; | |
943 | } | |
944 | ||
945 | static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification }; | |
946 | ||
3655adc7 | 947 | /* |
92c653cf JD |
948 | * The first collection of entropy occurs at system boot while interrupts |
949 | * are still turned off. Here we push in RDSEED, a timestamp, and utsname(). | |
950 | * Depending on the above configuration knob, RDSEED may be considered | |
951 | * sufficient for initialization. Note that much earlier setup may already | |
952 | * have pushed entropy into the input pool by the time we get here. | |
3655adc7 | 953 | */ |
92c653cf | 954 | int __init rand_initialize(void) |
3655adc7 | 955 | { |
92c653cf JD |
956 | size_t i; |
957 | ktime_t now = ktime_get_real(); | |
958 | bool arch_init = true; | |
959 | unsigned long rv; | |
186873c5 | 960 | |
1754abb3 JD |
961 | #if defined(LATENT_ENTROPY_PLUGIN) |
962 | static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy; | |
963 | _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); | |
964 | #endif | |
965 | ||
92c653cf JD |
966 | for (i = 0; i < BLAKE2S_BLOCK_SIZE; i += sizeof(rv)) { |
967 | if (!arch_get_random_seed_long_early(&rv) && | |
968 | !arch_get_random_long_early(&rv)) { | |
969 | rv = random_get_entropy(); | |
970 | arch_init = false; | |
971 | } | |
afba0b80 | 972 | _mix_pool_bytes(&rv, sizeof(rv)); |
92c653cf | 973 | } |
afba0b80 JD |
974 | _mix_pool_bytes(&now, sizeof(now)); |
975 | _mix_pool_bytes(utsname(), sizeof(*(utsname()))); | |
186873c5 | 976 | |
e85c0fc1 JD |
977 | if (crng_ready()) |
978 | crng_reseed(); | |
979 | else if (arch_init && trust_cpu) | |
980 | credit_init_bits(BLAKE2S_BLOCK_SIZE * 8); | |
e192be9d | 981 | |
0313bc27 LT |
982 | if (ratelimit_disable) { |
983 | urandom_warning.interval = 0; | |
92c653cf | 984 | unseeded_warning.interval = 0; |
0313bc27 | 985 | } |
4b758eda | 986 | |
b7b67d13 JD |
987 | WARN_ON(register_pm_notifier(&pm_notifier)); |
988 | ||
4b758eda JD |
989 | WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG " |
990 | "entropy collection will consequently suffer."); | |
92c653cf | 991 | return 0; |
3655adc7 | 992 | } |
e192be9d | 993 | |
a2080a67 | 994 | /* |
e192be9d TT |
995 | * Add device- or boot-specific data to the input pool to help |
996 | * initialize it. | |
a2080a67 | 997 | * |
e192be9d TT |
998 | * None of this adds any entropy; it is meant to avoid the problem of |
999 | * the entropy pool having similar initial state across largely | |
1000 | * identical devices. | |
a2080a67 | 1001 | */ |
04ec96b7 | 1002 | void add_device_randomness(const void *buf, size_t size) |
a2080a67 | 1003 | { |
4b758eda JD |
1004 | unsigned long entropy = random_get_entropy(); |
1005 | unsigned long flags; | |
a2080a67 | 1006 | |
3ef4cb2d | 1007 | spin_lock_irqsave(&input_pool.lock, flags); |
4b758eda | 1008 | _mix_pool_bytes(&entropy, sizeof(entropy)); |
90ed1e67 | 1009 | _mix_pool_bytes(buf, size); |
3ef4cb2d | 1010 | spin_unlock_irqrestore(&input_pool.lock, flags); |
a2080a67 LT |
1011 | } |
1012 | EXPORT_SYMBOL(add_device_randomness); | |
1013 | ||
92c653cf JD |
1014 | /* |
1015 | * Interface for in-kernel drivers of true hardware RNGs. | |
1016 | * Those devices may produce endless random bits and will be throttled | |
1017 | * when our pool is full. | |
1018 | */ | |
1019 | void add_hwgenerator_randomness(const void *buffer, size_t count, | |
1020 | size_t entropy) | |
1021 | { | |
e85c0fc1 JD |
1022 | mix_pool_bytes(buffer, count); |
1023 | credit_init_bits(entropy); | |
1024 | ||
92c653cf | 1025 | /* |
e85c0fc1 JD |
1026 | * Throttle writing to once every CRNG_RESEED_INTERVAL, unless |
1027 | * we're not yet initialized. | |
92c653cf | 1028 | */ |
e85c0fc1 JD |
1029 | if (!kthread_should_stop() && crng_ready()) |
1030 | schedule_timeout_interruptible(CRNG_RESEED_INTERVAL); | |
92c653cf JD |
1031 | } |
1032 | EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); | |
1033 | ||
1034 | /* | |
5c3b747e JD |
1035 | * Handle random seed passed by bootloader, and credit it if |
1036 | * CONFIG_RANDOM_TRUST_BOOTLOADER is set. | |
92c653cf JD |
1037 | */ |
1038 | void add_bootloader_randomness(const void *buf, size_t size) | |
1039 | { | |
5c3b747e | 1040 | mix_pool_bytes(buf, size); |
d97c68d1 | 1041 | if (trust_bootloader) |
e85c0fc1 | 1042 | credit_init_bits(size * 8); |
92c653cf JD |
1043 | } |
1044 | EXPORT_SYMBOL_GPL(add_bootloader_randomness); | |
1045 | ||
a4107d34 | 1046 | #if IS_ENABLED(CONFIG_VMGENID) |
f3c2682b JD |
1047 | static BLOCKING_NOTIFIER_HEAD(vmfork_chain); |
1048 | ||
ae099e8e JD |
1049 | /* |
1050 | * Handle a new unique VM ID, which is unique, not secret, so we | |
1051 | * don't credit it, but we do immediately force a reseed after so | |
1052 | * that it's used by the crng posthaste. | |
1053 | */ | |
1054 | void add_vmfork_randomness(const void *unique_vm_id, size_t size) | |
1055 | { | |
1056 | add_device_randomness(unique_vm_id, size); | |
1057 | if (crng_ready()) { | |
e85c0fc1 | 1058 | crng_reseed(); |
ae099e8e JD |
1059 | pr_notice("crng reseeded due to virtual machine fork\n"); |
1060 | } | |
f3c2682b | 1061 | blocking_notifier_call_chain(&vmfork_chain, 0, NULL); |
ae099e8e | 1062 | } |
a4107d34 | 1063 | #if IS_MODULE(CONFIG_VMGENID) |
ae099e8e | 1064 | EXPORT_SYMBOL_GPL(add_vmfork_randomness); |
a4107d34 | 1065 | #endif |
f3c2682b JD |
1066 | |
1067 | int register_random_vmfork_notifier(struct notifier_block *nb) | |
1068 | { | |
1069 | return blocking_notifier_chain_register(&vmfork_chain, nb); | |
1070 | } | |
1071 | EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); | |
1072 | ||
1073 | int unregister_random_vmfork_notifier(struct notifier_block *nb) | |
1074 | { | |
1075 | return blocking_notifier_chain_unregister(&vmfork_chain, nb); | |
1076 | } | |
1077 | EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier); | |
a4107d34 | 1078 | #endif |
ae099e8e | 1079 | |
92c653cf | 1080 | struct fast_pool { |
58340f8e | 1081 | struct work_struct mix; |
f5eab0e2 | 1082 | unsigned long pool[4]; |
92c653cf | 1083 | unsigned long last; |
3191dd5a | 1084 | unsigned int count; |
92c653cf JD |
1085 | }; |
1086 | ||
f5eab0e2 JD |
1087 | static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = { |
1088 | #ifdef CONFIG_64BIT | |
1089 | /* SipHash constants */ | |
1090 | .pool = { 0x736f6d6570736575UL, 0x646f72616e646f6dUL, | |
1091 | 0x6c7967656e657261UL, 0x7465646279746573UL } | |
1092 | #else | |
1093 | /* HalfSipHash constants */ | |
1094 | .pool = { 0, 0, 0x6c796765U, 0x74656462U } | |
1095 | #endif | |
1096 | }; | |
1097 | ||
92c653cf | 1098 | /* |
f5eab0e2 JD |
1099 | * This is [Half]SipHash-1-x, starting from an empty key. Because |
1100 | * the key is fixed, it assumes that its inputs are non-malicious, | |
1101 | * and therefore this has no security on its own. s represents the | |
4b758eda | 1102 | * four-word SipHash state, while v represents a two-word input. |
92c653cf | 1103 | */ |
4b758eda | 1104 | static void fast_mix(unsigned long s[4], const unsigned long v[2]) |
92c653cf | 1105 | { |
f5eab0e2 | 1106 | size_t i; |
92c653cf | 1107 | |
4b758eda | 1108 | for (i = 0; i < 2; ++i) { |
f5eab0e2 JD |
1109 | s[3] ^= v[i]; |
1110 | #ifdef CONFIG_64BIT | |
1111 | s[0] += s[1]; s[1] = rol64(s[1], 13); s[1] ^= s[0]; s[0] = rol64(s[0], 32); | |
1112 | s[2] += s[3]; s[3] = rol64(s[3], 16); s[3] ^= s[2]; | |
1113 | s[0] += s[3]; s[3] = rol64(s[3], 21); s[3] ^= s[0]; | |
1114 | s[2] += s[1]; s[1] = rol64(s[1], 17); s[1] ^= s[2]; s[2] = rol64(s[2], 32); | |
1115 | #else | |
1116 | s[0] += s[1]; s[1] = rol32(s[1], 5); s[1] ^= s[0]; s[0] = rol32(s[0], 16); | |
1117 | s[2] += s[3]; s[3] = rol32(s[3], 8); s[3] ^= s[2]; | |
1118 | s[0] += s[3]; s[3] = rol32(s[3], 7); s[3] ^= s[0]; | |
1119 | s[2] += s[1]; s[1] = rol32(s[1], 13); s[1] ^= s[2]; s[2] = rol32(s[2], 16); | |
1120 | #endif | |
1121 | s[0] ^= v[i]; | |
1122 | } | |
92c653cf JD |
1123 | } |
1124 | ||
3191dd5a JD |
1125 | #ifdef CONFIG_SMP |
1126 | /* | |
1127 | * This function is called when the CPU has just come online, with | |
1128 | * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. | |
1129 | */ | |
1130 | int random_online_cpu(unsigned int cpu) | |
1131 | { | |
1132 | /* | |
1133 | * During CPU shutdown and before CPU onlining, add_interrupt_ | |
1134 | * randomness() may schedule mix_interrupt_randomness(), and | |
1135 | * set the MIX_INFLIGHT flag. However, because the worker can | |
1136 | * be scheduled on a different CPU during this period, that | |
1137 | * flag will never be cleared. For that reason, we zero out | |
1138 | * the flag here, which runs just after workqueues are onlined | |
1139 | * for the CPU again. This also has the effect of setting the | |
1140 | * irq randomness count to zero so that new accumulated irqs | |
1141 | * are fresh. | |
1142 | */ | |
1143 | per_cpu_ptr(&irq_randomness, cpu)->count = 0; | |
1144 | return 0; | |
1145 | } | |
1146 | #endif | |
1147 | ||
58340f8e JD |
1148 | static void mix_interrupt_randomness(struct work_struct *work) |
1149 | { | |
1150 | struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix); | |
f5eab0e2 | 1151 | /* |
4b758eda JD |
1152 | * The size of the copied stack pool is explicitly 2 longs so that we |
1153 | * only ever ingest half of the siphash output each time, retaining | |
1154 | * the other half as the next "key" that carries over. The entropy is | |
1155 | * supposed to be sufficiently dispersed between bits so on average | |
1156 | * we don't wind up "losing" some. | |
f5eab0e2 | 1157 | */ |
4b758eda | 1158 | unsigned long pool[2]; |
58340f8e JD |
1159 | |
1160 | /* Check to see if we're running on the wrong CPU due to hotplug. */ | |
1161 | local_irq_disable(); | |
1162 | if (fast_pool != this_cpu_ptr(&irq_randomness)) { | |
1163 | local_irq_enable(); | |
58340f8e JD |
1164 | return; |
1165 | } | |
1166 | ||
1167 | /* | |
1168 | * Copy the pool to the stack so that the mixer always has a | |
1169 | * consistent view, before we reenable irqs again. | |
1170 | */ | |
f5eab0e2 | 1171 | memcpy(pool, fast_pool->pool, sizeof(pool)); |
3191dd5a | 1172 | fast_pool->count = 0; |
58340f8e JD |
1173 | fast_pool->last = jiffies; |
1174 | local_irq_enable(); | |
1175 | ||
5c3b747e | 1176 | mix_pool_bytes(pool, sizeof(pool)); |
e85c0fc1 | 1177 | credit_init_bits(1); |
c2a7de4f | 1178 | |
58340f8e JD |
1179 | memzero_explicit(pool, sizeof(pool)); |
1180 | } | |
1181 | ||
703f7066 | 1182 | void add_interrupt_randomness(int irq) |
1da177e4 | 1183 | { |
58340f8e | 1184 | enum { MIX_INFLIGHT = 1U << 31 }; |
4b758eda | 1185 | unsigned long entropy = random_get_entropy(); |
248045b8 JD |
1186 | struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); |
1187 | struct pt_regs *regs = get_irq_regs(); | |
58340f8e | 1188 | unsigned int new_count; |
b2f408fe | 1189 | |
4b758eda JD |
1190 | fast_mix(fast_pool->pool, (unsigned long[2]){ |
1191 | entropy, | |
1192 | (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq) | |
1193 | }); | |
3191dd5a | 1194 | new_count = ++fast_pool->count; |
3060d6fe | 1195 | |
58340f8e | 1196 | if (new_count & MIX_INFLIGHT) |
1da177e4 LT |
1197 | return; |
1198 | ||
5c3b747e | 1199 | if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ)) |
91fcb532 | 1200 | return; |
83664a69 | 1201 | |
58340f8e JD |
1202 | if (unlikely(!fast_pool->mix.func)) |
1203 | INIT_WORK(&fast_pool->mix, mix_interrupt_randomness); | |
3191dd5a | 1204 | fast_pool->count |= MIX_INFLIGHT; |
58340f8e | 1205 | queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix); |
1da177e4 | 1206 | } |
4b44f2d1 | 1207 | EXPORT_SYMBOL_GPL(add_interrupt_randomness); |
1da177e4 | 1208 | |
a4b5c26b JD |
1209 | /* There is one of these per entropy source */ |
1210 | struct timer_rand_state { | |
1211 | unsigned long last_time; | |
1212 | long last_delta, last_delta2; | |
1213 | }; | |
1214 | ||
1215 | /* | |
1216 | * This function adds entropy to the entropy "pool" by using timing | |
1217 | * delays. It uses the timer_rand_state structure to make an estimate | |
1218 | * of how many bits of entropy this call has added to the pool. | |
1219 | * | |
1220 | * The number "num" is also added to the pool - it should somehow describe | |
1221 | * the type of event which just happened. This is currently 0-255 for | |
1222 | * keyboard scan codes, and 256 upwards for interrupts. | |
1223 | */ | |
1224 | static void add_timer_randomness(struct timer_rand_state *state, unsigned int num) | |
1225 | { | |
1226 | unsigned long entropy = random_get_entropy(), now = jiffies, flags; | |
1227 | long delta, delta2, delta3; | |
1228 | ||
1229 | spin_lock_irqsave(&input_pool.lock, flags); | |
1230 | _mix_pool_bytes(&entropy, sizeof(entropy)); | |
1231 | _mix_pool_bytes(&num, sizeof(num)); | |
1232 | spin_unlock_irqrestore(&input_pool.lock, flags); | |
1233 | ||
1234 | if (crng_ready()) | |
1235 | return; | |
1236 | ||
1237 | /* | |
1238 | * Calculate number of bits of randomness we probably added. | |
1239 | * We take into account the first, second and third-order deltas | |
1240 | * in order to make our estimate. | |
1241 | */ | |
1242 | delta = now - READ_ONCE(state->last_time); | |
1243 | WRITE_ONCE(state->last_time, now); | |
1244 | ||
1245 | delta2 = delta - READ_ONCE(state->last_delta); | |
1246 | WRITE_ONCE(state->last_delta, delta); | |
1247 | ||
1248 | delta3 = delta2 - READ_ONCE(state->last_delta2); | |
1249 | WRITE_ONCE(state->last_delta2, delta2); | |
1250 | ||
1251 | if (delta < 0) | |
1252 | delta = -delta; | |
1253 | if (delta2 < 0) | |
1254 | delta2 = -delta2; | |
1255 | if (delta3 < 0) | |
1256 | delta3 = -delta3; | |
1257 | if (delta > delta2) | |
1258 | delta = delta2; | |
1259 | if (delta > delta3) | |
1260 | delta = delta3; | |
1261 | ||
1262 | /* | |
1263 | * delta is now minimum absolute delta. | |
1264 | * Round down by 1 bit on general principles, | |
1265 | * and limit entropy estimate to 12 bits. | |
1266 | */ | |
1267 | credit_init_bits(min_t(unsigned int, fls(delta >> 1), 11)); | |
1268 | } | |
1269 | ||
1270 | void add_input_randomness(unsigned int type, unsigned int code, | |
1271 | unsigned int value) | |
1272 | { | |
1273 | static unsigned char last_value; | |
1274 | static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES }; | |
1275 | ||
1276 | /* Ignore autorepeat and the like. */ | |
1277 | if (value == last_value) | |
1278 | return; | |
1279 | ||
1280 | last_value = value; | |
1281 | add_timer_randomness(&input_timer_state, | |
1282 | (type << 4) ^ code ^ (code >> 4) ^ value); | |
1283 | } | |
1284 | EXPORT_SYMBOL_GPL(add_input_randomness); | |
1285 | ||
1286 | #ifdef CONFIG_BLOCK | |
1287 | void add_disk_randomness(struct gendisk *disk) | |
1288 | { | |
1289 | if (!disk || !disk->random) | |
1290 | return; | |
1291 | /* First major is 1, so we get >= 0x200 here. */ | |
1292 | add_timer_randomness(disk->random, 0x100 + disk_devt(disk)); | |
1293 | } | |
1294 | EXPORT_SYMBOL_GPL(add_disk_randomness); | |
1295 | ||
1296 | void rand_initialize_disk(struct gendisk *disk) | |
1297 | { | |
1298 | struct timer_rand_state *state; | |
1299 | ||
1300 | /* | |
1301 | * If kzalloc returns null, we just won't use that entropy | |
1302 | * source. | |
1303 | */ | |
1304 | state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL); | |
1305 | if (state) { | |
1306 | state->last_time = INITIAL_JIFFIES; | |
1307 | disk->random = state; | |
1308 | } | |
1309 | } | |
1310 | #endif | |
1311 | ||
78c768e6 JD |
1312 | struct entropy_timer_state { |
1313 | unsigned long entropy; | |
1314 | struct timer_list timer; | |
1315 | unsigned int samples, samples_per_bit; | |
1316 | }; | |
1317 | ||
50ee7529 LT |
1318 | /* |
1319 | * Each time the timer fires, we expect that we got an unpredictable | |
1320 | * jump in the cycle counter. Even if the timer is running on another | |
1321 | * CPU, the timer activity will be touching the stack of the CPU that is | |
1322 | * generating entropy.. | |
1323 | * | |
1324 | * Note that we don't re-arm the timer in the timer itself - we are | |
1325 | * happy to be scheduled away, since that just makes the load more | |
1326 | * complex, but we do not want the timer to keep ticking unless the | |
1327 | * entropy loop is running. | |
1328 | * | |
1329 | * So the re-arming always happens in the entropy loop itself. | |
1330 | */ | |
78c768e6 | 1331 | static void entropy_timer(struct timer_list *timer) |
50ee7529 | 1332 | { |
78c768e6 JD |
1333 | struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer); |
1334 | ||
1335 | if (++state->samples == state->samples_per_bit) { | |
e85c0fc1 | 1336 | credit_init_bits(1); |
78c768e6 JD |
1337 | state->samples = 0; |
1338 | } | |
50ee7529 LT |
1339 | } |
1340 | ||
1341 | /* | |
1342 | * If we have an actual cycle counter, see if we can | |
1343 | * generate enough entropy with timing noise | |
1344 | */ | |
1345 | static void try_to_generate_entropy(void) | |
1346 | { | |
78c768e6 JD |
1347 | enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 }; |
1348 | struct entropy_timer_state stack; | |
1349 | unsigned int i, num_different = 0; | |
1350 | unsigned long last = random_get_entropy(); | |
50ee7529 | 1351 | |
78c768e6 JD |
1352 | for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) { |
1353 | stack.entropy = random_get_entropy(); | |
1354 | if (stack.entropy != last) | |
1355 | ++num_different; | |
1356 | last = stack.entropy; | |
1357 | } | |
1358 | stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1); | |
1359 | if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT) | |
50ee7529 LT |
1360 | return; |
1361 | ||
78c768e6 | 1362 | stack.samples = 0; |
50ee7529 | 1363 | timer_setup_on_stack(&stack.timer, entropy_timer, 0); |
3e504d20 | 1364 | while (!crng_ready() && !signal_pending(current)) { |
50ee7529 | 1365 | if (!timer_pending(&stack.timer)) |
248045b8 | 1366 | mod_timer(&stack.timer, jiffies + 1); |
4b758eda | 1367 | mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); |
50ee7529 | 1368 | schedule(); |
4b758eda | 1369 | stack.entropy = random_get_entropy(); |
50ee7529 LT |
1370 | } |
1371 | ||
1372 | del_timer_sync(&stack.timer); | |
1373 | destroy_timer_on_stack(&stack.timer); | |
4b758eda | 1374 | mix_pool_bytes(&stack.entropy, sizeof(stack.entropy)); |
50ee7529 LT |
1375 | } |
1376 | ||
a6adf8e7 JD |
1377 | |
1378 | /********************************************************************** | |
1379 | * | |
1380 | * Userspace reader/writer interfaces. | |
1381 | * | |
1382 | * getrandom(2) is the primary modern interface into the RNG and should | |
1383 | * be used in preference to anything else. | |
1384 | * | |
0313bc27 LT |
1385 | * Reading from /dev/random has the same functionality as calling |
1386 | * getrandom(2) with flags=0. In earlier versions, however, it had | |
1387 | * vastly different semantics and should therefore be avoided, to | |
1388 | * prevent backwards compatibility issues. | |
1389 | * | |
1390 | * Reading from /dev/urandom has the same functionality as calling | |
1391 | * getrandom(2) with flags=GRND_INSECURE. Because it does not block | |
1392 | * waiting for the RNG to be ready, it should not be used. | |
a6adf8e7 JD |
1393 | * |
1394 | * Writing to either /dev/random or /dev/urandom adds entropy to | |
1395 | * the input pool but does not credit it. | |
1396 | * | |
0313bc27 LT |
1397 | * Polling on /dev/random indicates when the RNG is initialized, on |
1398 | * the read side, and when it wants new entropy, on the write side. | |
a6adf8e7 JD |
1399 | * |
1400 | * Both /dev/random and /dev/urandom have the same set of ioctls for | |
1401 | * adding entropy, getting the entropy count, zeroing the count, and | |
1402 | * reseeding the crng. | |
1403 | * | |
1404 | **********************************************************************/ | |
1405 | ||
1406 | SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, | |
1407 | flags) | |
1da177e4 | 1408 | { |
a6adf8e7 JD |
1409 | if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE)) |
1410 | return -EINVAL; | |
301f0595 | 1411 | |
a6adf8e7 JD |
1412 | /* |
1413 | * Requesting insecure and blocking randomness at the same time makes | |
1414 | * no sense. | |
1415 | */ | |
1416 | if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM)) | |
1417 | return -EINVAL; | |
c6f1deb1 | 1418 | |
a6adf8e7 JD |
1419 | if (count > INT_MAX) |
1420 | count = INT_MAX; | |
1da177e4 | 1421 | |
a6adf8e7 JD |
1422 | if (!(flags & GRND_INSECURE) && !crng_ready()) { |
1423 | int ret; | |
30c08efe | 1424 | |
a6adf8e7 JD |
1425 | if (flags & GRND_NONBLOCK) |
1426 | return -EAGAIN; | |
1427 | ret = wait_for_random_bytes(); | |
1428 | if (unlikely(ret)) | |
1429 | return ret; | |
1430 | } | |
1431 | return get_random_bytes_user(buf, count); | |
30c08efe AL |
1432 | } |
1433 | ||
248045b8 | 1434 | static __poll_t random_poll(struct file *file, poll_table *wait) |
1da177e4 | 1435 | { |
30c08efe | 1436 | poll_wait(file, &crng_init_wait, wait); |
e85c0fc1 | 1437 | return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM; |
1da177e4 LT |
1438 | } |
1439 | ||
04ec96b7 | 1440 | static int write_pool(const char __user *ubuf, size_t count) |
1da177e4 | 1441 | { |
04ec96b7 | 1442 | size_t len; |
7b5164fb | 1443 | int ret = 0; |
04ec96b7 | 1444 | u8 block[BLAKE2S_BLOCK_SIZE]; |
1da177e4 | 1445 | |
04ec96b7 JD |
1446 | while (count) { |
1447 | len = min(count, sizeof(block)); | |
7b5164fb JD |
1448 | if (copy_from_user(block, ubuf, len)) { |
1449 | ret = -EFAULT; | |
1450 | goto out; | |
1451 | } | |
04ec96b7 JD |
1452 | count -= len; |
1453 | ubuf += len; | |
1454 | mix_pool_bytes(block, len); | |
91f3f1e3 | 1455 | cond_resched(); |
1da177e4 | 1456 | } |
7f397dcd | 1457 | |
7b5164fb JD |
1458 | out: |
1459 | memzero_explicit(block, sizeof(block)); | |
1460 | return ret; | |
7f397dcd MM |
1461 | } |
1462 | ||
90b75ee5 MM |
1463 | static ssize_t random_write(struct file *file, const char __user *buffer, |
1464 | size_t count, loff_t *ppos) | |
7f397dcd | 1465 | { |
04ec96b7 | 1466 | int ret; |
7f397dcd | 1467 | |
90ed1e67 | 1468 | ret = write_pool(buffer, count); |
7f397dcd MM |
1469 | if (ret) |
1470 | return ret; | |
1471 | ||
7f397dcd | 1472 | return (ssize_t)count; |
1da177e4 LT |
1473 | } |
1474 | ||
0313bc27 LT |
1475 | static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes, |
1476 | loff_t *ppos) | |
1477 | { | |
1478 | static int maxwarn = 10; | |
1479 | ||
48bff105 JD |
1480 | /* |
1481 | * Opportunistically attempt to initialize the RNG on platforms that | |
1482 | * have fast cycle counters, but don't (for now) require it to succeed. | |
1483 | */ | |
1484 | if (!crng_ready()) | |
1485 | try_to_generate_entropy(); | |
1486 | ||
0313bc27 LT |
1487 | if (!crng_ready() && maxwarn > 0) { |
1488 | maxwarn--; | |
1489 | if (__ratelimit(&urandom_warning)) | |
1490 | pr_notice("%s: uninitialized urandom read (%zd bytes read)\n", | |
1491 | current->comm, nbytes); | |
1492 | } | |
1493 | ||
1494 | return get_random_bytes_user(buf, nbytes); | |
1495 | } | |
1496 | ||
a6adf8e7 JD |
1497 | static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes, |
1498 | loff_t *ppos) | |
1499 | { | |
1500 | int ret; | |
1501 | ||
1502 | ret = wait_for_random_bytes(); | |
1503 | if (ret != 0) | |
1504 | return ret; | |
1505 | return get_random_bytes_user(buf, nbytes); | |
1506 | } | |
1507 | ||
43ae4860 | 1508 | static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
1da177e4 LT |
1509 | { |
1510 | int size, ent_count; | |
1511 | int __user *p = (int __user *)arg; | |
1512 | int retval; | |
1513 | ||
1514 | switch (cmd) { | |
1515 | case RNDGETENTCNT: | |
a6adf8e7 | 1516 | /* Inherently racy, no point locking. */ |
e85c0fc1 | 1517 | if (put_user(input_pool.init_bits, p)) |
1da177e4 LT |
1518 | return -EFAULT; |
1519 | return 0; | |
1520 | case RNDADDTOENTCNT: | |
1521 | if (!capable(CAP_SYS_ADMIN)) | |
1522 | return -EPERM; | |
1523 | if (get_user(ent_count, p)) | |
1524 | return -EFAULT; | |
a49c010e JD |
1525 | if (ent_count < 0) |
1526 | return -EINVAL; | |
e85c0fc1 | 1527 | credit_init_bits(ent_count); |
a49c010e | 1528 | return 0; |
1da177e4 LT |
1529 | case RNDADDENTROPY: |
1530 | if (!capable(CAP_SYS_ADMIN)) | |
1531 | return -EPERM; | |
1532 | if (get_user(ent_count, p++)) | |
1533 | return -EFAULT; | |
1534 | if (ent_count < 0) | |
1535 | return -EINVAL; | |
1536 | if (get_user(size, p++)) | |
1537 | return -EFAULT; | |
90ed1e67 | 1538 | retval = write_pool((const char __user *)p, size); |
1da177e4 LT |
1539 | if (retval < 0) |
1540 | return retval; | |
e85c0fc1 | 1541 | credit_init_bits(ent_count); |
a49c010e | 1542 | return 0; |
1da177e4 LT |
1543 | case RNDZAPENTCNT: |
1544 | case RNDCLEARPOOL: | |
e85c0fc1 | 1545 | /* No longer has any effect. */ |
1da177e4 LT |
1546 | if (!capable(CAP_SYS_ADMIN)) |
1547 | return -EPERM; | |
1da177e4 | 1548 | return 0; |
d848e5f8 TT |
1549 | case RNDRESEEDCRNG: |
1550 | if (!capable(CAP_SYS_ADMIN)) | |
1551 | return -EPERM; | |
a96cfe2d | 1552 | if (!crng_ready()) |
d848e5f8 | 1553 | return -ENODATA; |
e85c0fc1 | 1554 | crng_reseed(); |
d848e5f8 | 1555 | return 0; |
1da177e4 LT |
1556 | default: |
1557 | return -EINVAL; | |
1558 | } | |
1559 | } | |
1560 | ||
9a6f70bb JD |
1561 | static int random_fasync(int fd, struct file *filp, int on) |
1562 | { | |
1563 | return fasync_helper(fd, filp, on, &fasync); | |
1564 | } | |
1565 | ||
2b8693c0 | 1566 | const struct file_operations random_fops = { |
248045b8 | 1567 | .read = random_read, |
1da177e4 | 1568 | .write = random_write, |
248045b8 | 1569 | .poll = random_poll, |
43ae4860 | 1570 | .unlocked_ioctl = random_ioctl, |
507e4e2b | 1571 | .compat_ioctl = compat_ptr_ioctl, |
9a6f70bb | 1572 | .fasync = random_fasync, |
6038f373 | 1573 | .llseek = noop_llseek, |
1da177e4 LT |
1574 | }; |
1575 | ||
0313bc27 LT |
1576 | const struct file_operations urandom_fops = { |
1577 | .read = urandom_read, | |
1578 | .write = random_write, | |
1579 | .unlocked_ioctl = random_ioctl, | |
1580 | .compat_ioctl = compat_ptr_ioctl, | |
1581 | .fasync = random_fasync, | |
1582 | .llseek = noop_llseek, | |
1583 | }; | |
1584 | ||
0deff3c4 | 1585 | |
1da177e4 LT |
1586 | /******************************************************************** |
1587 | * | |
0deff3c4 JD |
1588 | * Sysctl interface. |
1589 | * | |
1590 | * These are partly unused legacy knobs with dummy values to not break | |
1591 | * userspace and partly still useful things. They are usually accessible | |
1592 | * in /proc/sys/kernel/random/ and are as follows: | |
1593 | * | |
1594 | * - boot_id - a UUID representing the current boot. | |
1595 | * | |
1596 | * - uuid - a random UUID, different each time the file is read. | |
1597 | * | |
1598 | * - poolsize - the number of bits of entropy that the input pool can | |
1599 | * hold, tied to the POOL_BITS constant. | |
1600 | * | |
1601 | * - entropy_avail - the number of bits of entropy currently in the | |
1602 | * input pool. Always <= poolsize. | |
1603 | * | |
1604 | * - write_wakeup_threshold - the amount of entropy in the input pool | |
1605 | * below which write polls to /dev/random will unblock, requesting | |
e85c0fc1 | 1606 | * more entropy, tied to the POOL_INIT_BITS constant. It is writable |
0deff3c4 JD |
1607 | * to avoid breaking old userspaces, but writing to it does not |
1608 | * change any behavior of the RNG. | |
1609 | * | |
d0efdf35 | 1610 | * - urandom_min_reseed_secs - fixed to the value CRNG_RESEED_INTERVAL. |
0deff3c4 JD |
1611 | * It is writable to avoid breaking old userspaces, but writing |
1612 | * to it does not change any behavior of the RNG. | |
1da177e4 LT |
1613 | * |
1614 | ********************************************************************/ | |
1615 | ||
1616 | #ifdef CONFIG_SYSCTL | |
1617 | ||
1618 | #include <linux/sysctl.h> | |
1619 | ||
d0efdf35 | 1620 | static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ; |
e85c0fc1 | 1621 | static int sysctl_random_write_wakeup_bits = POOL_INIT_BITS; |
489c7fc4 | 1622 | static int sysctl_poolsize = POOL_BITS; |
64276a99 | 1623 | static u8 sysctl_bootid[UUID_SIZE]; |
1da177e4 LT |
1624 | |
1625 | /* | |
f22052b2 | 1626 | * This function is used to return both the bootid UUID, and random |
64276a99 | 1627 | * UUID. The difference is in whether table->data is NULL; if it is, |
1da177e4 | 1628 | * then a new UUID is generated and returned to the user. |
1da177e4 | 1629 | */ |
248045b8 JD |
1630 | static int proc_do_uuid(struct ctl_table *table, int write, void *buffer, |
1631 | size_t *lenp, loff_t *ppos) | |
1da177e4 | 1632 | { |
64276a99 JD |
1633 | u8 tmp_uuid[UUID_SIZE], *uuid; |
1634 | char uuid_string[UUID_STRING_LEN + 1]; | |
1635 | struct ctl_table fake_table = { | |
1636 | .data = uuid_string, | |
1637 | .maxlen = UUID_STRING_LEN | |
1638 | }; | |
1639 | ||
1640 | if (write) | |
1641 | return -EPERM; | |
1da177e4 LT |
1642 | |
1643 | uuid = table->data; | |
1644 | if (!uuid) { | |
1645 | uuid = tmp_uuid; | |
1da177e4 | 1646 | generate_random_uuid(uuid); |
44e4360f MD |
1647 | } else { |
1648 | static DEFINE_SPINLOCK(bootid_spinlock); | |
1649 | ||
1650 | spin_lock(&bootid_spinlock); | |
1651 | if (!uuid[8]) | |
1652 | generate_random_uuid(uuid); | |
1653 | spin_unlock(&bootid_spinlock); | |
1654 | } | |
1da177e4 | 1655 | |
64276a99 JD |
1656 | snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid); |
1657 | return proc_dostring(&fake_table, 0, buffer, lenp, ppos); | |
1da177e4 LT |
1658 | } |
1659 | ||
77553cf8 JD |
1660 | /* The same as proc_dointvec, but writes don't change anything. */ |
1661 | static int proc_do_rointvec(struct ctl_table *table, int write, void *buffer, | |
1662 | size_t *lenp, loff_t *ppos) | |
1663 | { | |
1664 | return write ? 0 : proc_dointvec(table, 0, buffer, lenp, ppos); | |
1665 | } | |
1666 | ||
5475e8f0 | 1667 | static struct ctl_table random_table[] = { |
1da177e4 | 1668 | { |
1da177e4 LT |
1669 | .procname = "poolsize", |
1670 | .data = &sysctl_poolsize, | |
1671 | .maxlen = sizeof(int), | |
1672 | .mode = 0444, | |
6d456111 | 1673 | .proc_handler = proc_dointvec, |
1da177e4 LT |
1674 | }, |
1675 | { | |
1da177e4 | 1676 | .procname = "entropy_avail", |
e85c0fc1 | 1677 | .data = &input_pool.init_bits, |
1da177e4 LT |
1678 | .maxlen = sizeof(int), |
1679 | .mode = 0444, | |
c5704490 | 1680 | .proc_handler = proc_dointvec, |
1da177e4 | 1681 | }, |
1da177e4 | 1682 | { |
1da177e4 | 1683 | .procname = "write_wakeup_threshold", |
0deff3c4 | 1684 | .data = &sysctl_random_write_wakeup_bits, |
1da177e4 LT |
1685 | .maxlen = sizeof(int), |
1686 | .mode = 0644, | |
77553cf8 | 1687 | .proc_handler = proc_do_rointvec, |
1da177e4 | 1688 | }, |
f5c2742c TT |
1689 | { |
1690 | .procname = "urandom_min_reseed_secs", | |
0deff3c4 | 1691 | .data = &sysctl_random_min_urandom_seed, |
f5c2742c TT |
1692 | .maxlen = sizeof(int), |
1693 | .mode = 0644, | |
77553cf8 | 1694 | .proc_handler = proc_do_rointvec, |
f5c2742c | 1695 | }, |
1da177e4 | 1696 | { |
1da177e4 LT |
1697 | .procname = "boot_id", |
1698 | .data = &sysctl_bootid, | |
1da177e4 | 1699 | .mode = 0444, |
6d456111 | 1700 | .proc_handler = proc_do_uuid, |
1da177e4 LT |
1701 | }, |
1702 | { | |
1da177e4 | 1703 | .procname = "uuid", |
1da177e4 | 1704 | .mode = 0444, |
6d456111 | 1705 | .proc_handler = proc_do_uuid, |
1da177e4 | 1706 | }, |
894d2491 | 1707 | { } |
1da177e4 | 1708 | }; |
5475e8f0 XN |
1709 | |
1710 | /* | |
1711 | * rand_initialize() is called before sysctl_init(), | |
1712 | * so we cannot call register_sysctl_init() in rand_initialize() | |
1713 | */ | |
1714 | static int __init random_sysctls_init(void) | |
1715 | { | |
1716 | register_sysctl_init("kernel/random", random_table); | |
1717 | return 0; | |
1718 | } | |
1719 | device_initcall(random_sysctls_init); | |
0deff3c4 | 1720 | #endif |