Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
6071a6c0 | 2 | |
1da177e4 LT |
3 | #ifndef _LINUX_RANDOM_H |
4 | #define _LINUX_RANDOM_H | |
5 | ||
253d3194 MR |
6 | #include <linux/bug.h> |
7 | #include <linux/kernel.h> | |
205a525c | 8 | #include <linux/list.h> |
897ece56 DB |
9 | #include <linux/once.h> |
10 | ||
607ca46e | 11 | #include <uapi/linux/random.h> |
1da177e4 | 12 | |
5acd3548 | 13 | struct notifier_block; |
205a525c | 14 | |
a1940263 | 15 | void add_device_randomness(const void *buf, size_t len); |
39e0f991 | 16 | void __init add_bootloader_randomness(const void *buf, size_t len); |
7782cfec JD |
17 | void add_input_randomness(unsigned int type, unsigned int code, |
18 | unsigned int value) __latent_entropy; | |
19 | void add_interrupt_randomness(int irq) __latent_entropy; | |
db516da9 | 20 | void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy, bool sleep_after); |
38addce8 | 21 | |
38addce8 ER |
22 | static inline void add_latent_entropy(void) |
23 | { | |
d7bf7f3b | 24 | #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) |
2f14062b | 25 | add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy)); |
38addce8 | 26 | #else |
d7bf7f3b | 27 | add_device_randomness(NULL, 0); |
38addce8 | 28 | #endif |
d7bf7f3b | 29 | } |
38addce8 | 30 | |
a4107d34 | 31 | #if IS_ENABLED(CONFIG_VMGENID) |
a1940263 | 32 | void add_vmfork_randomness(const void *unique_vm_id, size_t len); |
7782cfec JD |
33 | int register_random_vmfork_notifier(struct notifier_block *nb); |
34 | int unregister_random_vmfork_notifier(struct notifier_block *nb); | |
f3c2682b JD |
35 | #else |
36 | static inline int register_random_vmfork_notifier(struct notifier_block *nb) { return 0; } | |
37 | static inline int unregister_random_vmfork_notifier(struct notifier_block *nb) { return 0; } | |
a4107d34 | 38 | #endif |
1da177e4 | 39 | |
a1940263 | 40 | void get_random_bytes(void *buf, size_t len); |
585cd5fe JD |
41 | u8 get_random_u8(void); |
42 | u16 get_random_u16(void); | |
c440408c JD |
43 | u32 get_random_u32(void); |
44 | u64 get_random_u64(void); | |
c440408c JD |
45 | static inline unsigned long get_random_long(void) |
46 | { | |
47 | #if BITS_PER_LONG == 64 | |
48 | return get_random_u64(); | |
49 | #else | |
50 | return get_random_u32(); | |
51 | #endif | |
52 | } | |
53 | ||
e9a688bc JD |
54 | u32 __get_random_u32_below(u32 ceil); |
55 | ||
56 | /* | |
57 | * Returns a random integer in the interval [0, ceil), with uniform | |
58 | * distribution, suitable for all uses. Fastest when ceil is a constant, but | |
59 | * still fast for variable ceil as well. | |
60 | */ | |
61 | static inline u32 get_random_u32_below(u32 ceil) | |
62 | { | |
63 | if (!__builtin_constant_p(ceil)) | |
64 | return __get_random_u32_below(ceil); | |
65 | ||
66 | /* | |
67 | * For the fast path, below, all operations on ceil are precomputed by | |
68 | * the compiler, so this incurs no overhead for checking pow2, doing | |
69 | * divisions, or branching based on integer size. The resultant | |
70 | * algorithm does traditional reciprocal multiplication (typically | |
71 | * optimized by the compiler into shifts and adds), rejecting samples | |
72 | * whose lower half would indicate a range indivisible by ceil. | |
73 | */ | |
74 | BUILD_BUG_ON_MSG(!ceil, "get_random_u32_below() must take ceil > 0"); | |
75 | if (ceil <= 1) | |
76 | return 0; | |
77 | for (;;) { | |
78 | if (ceil <= 1U << 8) { | |
79 | u32 mult = ceil * get_random_u8(); | |
80 | if (likely(is_power_of_2(ceil) || (u8)mult >= (1U << 8) % ceil)) | |
81 | return mult >> 8; | |
82 | } else if (ceil <= 1U << 16) { | |
83 | u32 mult = ceil * get_random_u16(); | |
84 | if (likely(is_power_of_2(ceil) || (u16)mult >= (1U << 16) % ceil)) | |
85 | return mult >> 16; | |
86 | } else { | |
87 | u64 mult = (u64)ceil * get_random_u32(); | |
88 | if (likely(is_power_of_2(ceil) || (u32)mult >= -ceil % ceil)) | |
89 | return mult >> 32; | |
90 | } | |
91 | } | |
92 | } | |
93 | ||
7f576b25 JD |
94 | /* |
95 | * Returns a random integer in the interval (floor, U32_MAX], with uniform | |
96 | * distribution, suitable for all uses. Fastest when floor is a constant, but | |
97 | * still fast for variable floor as well. | |
98 | */ | |
99 | static inline u32 get_random_u32_above(u32 floor) | |
100 | { | |
101 | BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && floor == U32_MAX, | |
102 | "get_random_u32_above() must take floor < U32_MAX"); | |
103 | return floor + 1 + get_random_u32_below(U32_MAX - floor); | |
104 | } | |
105 | ||
106 | /* | |
107 | * Returns a random integer in the interval [floor, ceil], with uniform | |
108 | * distribution, suitable for all uses. Fastest when floor and ceil are | |
109 | * constant, but still fast for variable floor and ceil as well. | |
110 | */ | |
111 | static inline u32 get_random_u32_inclusive(u32 floor, u32 ceil) | |
112 | { | |
113 | BUILD_BUG_ON_MSG(__builtin_constant_p(floor) && __builtin_constant_p(ceil) && | |
114 | (floor > ceil || ceil - floor == U32_MAX), | |
115 | "get_random_u32_inclusive() must take floor <= ceil"); | |
116 | return floor + get_random_u32_below(ceil - floor + 1); | |
117 | } | |
118 | ||
f6238499 JD |
119 | void __init random_init_early(const char *command_line); |
120 | void __init random_init(void); | |
7782cfec JD |
121 | bool rng_is_initialized(void); |
122 | int wait_for_random_bytes(void); | |
7782cfec | 123 | |
da9ba564 JD |
124 | /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). |
125 | * Returns the result of the call to wait_for_random_bytes. */ | |
04ec96b7 | 126 | static inline int get_random_bytes_wait(void *buf, size_t nbytes) |
da9ba564 JD |
127 | { |
128 | int ret = wait_for_random_bytes(); | |
da9ba564 | 129 | get_random_bytes(buf, nbytes); |
25e3fca4 | 130 | return ret; |
da9ba564 JD |
131 | } |
132 | ||
7c3a8a1d JD |
133 | #define declare_get_random_var_wait(name, ret_type) \ |
134 | static inline int get_random_ ## name ## _wait(ret_type *out) { \ | |
da9ba564 JD |
135 | int ret = wait_for_random_bytes(); \ |
136 | if (unlikely(ret)) \ | |
137 | return ret; \ | |
7c3a8a1d | 138 | *out = get_random_ ## name(); \ |
da9ba564 JD |
139 | return 0; \ |
140 | } | |
a890d1c6 JD |
141 | declare_get_random_var_wait(u8, u8) |
142 | declare_get_random_var_wait(u16, u16) | |
7c3a8a1d JD |
143 | declare_get_random_var_wait(u32, u32) |
144 | declare_get_random_var_wait(u64, u32) | |
7c3a8a1d | 145 | declare_get_random_var_wait(long, unsigned long) |
da9ba564 JD |
146 | #undef declare_get_random_var |
147 | ||
5960164f | 148 | /* |
c0842fbc LT |
149 | * This is designed to be standalone for just prandom |
150 | * users, but for now we include it from <linux/random.h> | |
151 | * for legacy reasons. | |
5960164f | 152 | */ |
c0842fbc | 153 | #include <linux/prandom.h> |
5960164f | 154 | |
9592eef7 | 155 | #include <asm/archrandom.h> |
63d77173 | 156 | |
3191dd5a | 157 | #ifdef CONFIG_SMP |
7782cfec JD |
158 | int random_prepare_cpu(unsigned int cpu); |
159 | int random_online_cpu(unsigned int cpu); | |
160 | #endif | |
161 | ||
162 | #ifndef MODULE | |
163 | extern const struct file_operations random_fops, urandom_fops; | |
3191dd5a JD |
164 | #endif |
165 | ||
1da177e4 | 166 | #endif /* _LINUX_RANDOM_H */ |