Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
c22ce143 HY |
2 | #ifndef __LINUX_UACCESS_H__ |
3 | #define __LINUX_UACCESS_H__ | |
4 | ||
4d0e9df5 | 5 | #include <linux/fault-inject-usercopy.h> |
76d6f06c | 6 | #include <linux/instrumented.h> |
b296a6d5 | 7 | #include <linux/minmax.h> |
8bcbde54 | 8 | #include <linux/sched.h> |
af1d5b37 | 9 | #include <linux/thread_info.h> |
5e6039d8 | 10 | |
c22ce143 HY |
11 | #include <asm/uaccess.h> |
12 | ||
428e106a KS |
13 | /* |
14 | * Architectures that support memory tagging (assigning tags to memory regions, | |
15 | * embedding these tags into addresses that point to these memory regions, and | |
16 | * checking that the memory and the pointer tags match on memory accesses) | |
17 | * redefine this macro to strip tags from pointers. | |
18 | * | |
19 | * Passing down mm_struct allows to define untagging rules on per-process | |
20 | * basis. | |
21 | * | |
22 | * It's defined as noop for architectures that don't support memory tagging. | |
23 | */ | |
24 | #ifndef untagged_addr | |
25 | #define untagged_addr(addr) (addr) | |
26 | #endif | |
27 | ||
28 | #ifndef untagged_addr_remote | |
29 | #define untagged_addr_remote(mm, addr) ({ \ | |
30 | mmap_assert_locked(mm); \ | |
31 | untagged_addr(addr); \ | |
32 | }) | |
33 | #endif | |
34 | ||
d597580d AV |
35 | /* |
36 | * Architectures should provide two primitives (raw_copy_{to,from}_user()) | |
701cac61 AV |
37 | * and get rid of their private instances of copy_{to,from}_user() and |
38 | * __copy_{to,from}_user{,_inatomic}(). | |
d597580d AV |
39 | * |
40 | * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and | |
41 | * return the amount left to copy. They should assume that access_ok() has | |
42 | * already been checked (and succeeded); they should *not* zero-pad anything. | |
43 | * No KASAN or object size checks either - those belong here. | |
44 | * | |
45 | * Both of these functions should attempt to copy size bytes starting at from | |
46 | * into the area starting at to. They must not fetch or store anything | |
47 | * outside of those areas. Return value must be between 0 (everything | |
48 | * copied successfully) and size (nothing copied). | |
49 | * | |
50 | * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting | |
51 | * at to must become equal to the bytes fetched from the corresponding area | |
52 | * starting at from. All data past to + size - N must be left unmodified. | |
53 | * | |
54 | * If copying succeeds, the return value must be 0. If some data cannot be | |
55 | * fetched, it is permitted to copy less than had been fetched; the only | |
56 | * hard requirement is that not storing anything at all (i.e. returning size) | |
57 | * should happen only when nothing could be copied. In other words, you don't | |
58 | * have to squeeze as much as possible - it is allowed, but not necessary. | |
59 | * | |
60 | * For raw_copy_from_user() to always points to kernel memory and no faults | |
61 | * on store should happen. Interpretation of from is affected by set_fs(). | |
62 | * For raw_copy_to_user() it's the other way round. | |
63 | * | |
64 | * Both can be inlined - it's up to architectures whether it wants to bother | |
65 | * with that. They should not be used directly; they are used to implement | |
66 | * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic()) | |
67 | * that are used instead. Out of those, __... ones are inlined. Plain | |
68 | * copy_{to,from}_user() might or might not be inlined. If you want them | |
69 | * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER. | |
70 | * | |
71 | * NOTE: only copy_from_user() zero-pads the destination in case of short copy. | |
72 | * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything | |
73 | * at all; their callers absolutely must check the return value. | |
74 | * | |
75 | * Biarch ones should also provide raw_copy_in_user() - similar to the above, | |
76 | * but both source and destination are __user pointers (affected by set_fs() | |
77 | * as usual) and both source and destination can trigger faults. | |
78 | */ | |
79 | ||
9dd819a1 | 80 | static __always_inline __must_check unsigned long |
d597580d AV |
81 | __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) |
82 | { | |
33b75c1d AP |
83 | unsigned long res; |
84 | ||
85 | instrument_copy_from_user_before(to, from, n); | |
d597580d | 86 | check_object_size(to, n, false); |
33b75c1d AP |
87 | res = raw_copy_from_user(to, from, n); |
88 | instrument_copy_from_user_after(to, from, n, res); | |
89 | return res; | |
d597580d AV |
90 | } |
91 | ||
9dd819a1 | 92 | static __always_inline __must_check unsigned long |
d597580d AV |
93 | __copy_from_user(void *to, const void __user *from, unsigned long n) |
94 | { | |
33b75c1d AP |
95 | unsigned long res; |
96 | ||
d597580d | 97 | might_fault(); |
33b75c1d | 98 | instrument_copy_from_user_before(to, from, n); |
4d0e9df5 AL |
99 | if (should_fail_usercopy()) |
100 | return n; | |
d597580d | 101 | check_object_size(to, n, false); |
33b75c1d AP |
102 | res = raw_copy_from_user(to, from, n); |
103 | instrument_copy_from_user_after(to, from, n, res); | |
104 | return res; | |
d597580d AV |
105 | } |
106 | ||
107 | /** | |
108 | * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. | |
109 | * @to: Destination address, in user space. | |
110 | * @from: Source address, in kernel space. | |
111 | * @n: Number of bytes to copy. | |
112 | * | |
113 | * Context: User context only. | |
114 | * | |
115 | * Copy data from kernel space to user space. Caller must check | |
116 | * the specified block with access_ok() before calling this function. | |
117 | * The caller should also make sure he pins the user space address | |
118 | * so that we don't result in page fault and sleep. | |
119 | */ | |
9dd819a1 | 120 | static __always_inline __must_check unsigned long |
d597580d AV |
121 | __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) |
122 | { | |
4d0e9df5 AL |
123 | if (should_fail_usercopy()) |
124 | return n; | |
76d6f06c | 125 | instrument_copy_to_user(to, from, n); |
d597580d AV |
126 | check_object_size(from, n, true); |
127 | return raw_copy_to_user(to, from, n); | |
128 | } | |
129 | ||
9dd819a1 | 130 | static __always_inline __must_check unsigned long |
d597580d AV |
131 | __copy_to_user(void __user *to, const void *from, unsigned long n) |
132 | { | |
133 | might_fault(); | |
4d0e9df5 AL |
134 | if (should_fail_usercopy()) |
135 | return n; | |
76d6f06c | 136 | instrument_copy_to_user(to, from, n); |
d597580d AV |
137 | check_object_size(from, n, true); |
138 | return raw_copy_to_user(to, from, n); | |
139 | } | |
140 | ||
141 | #ifdef INLINE_COPY_FROM_USER | |
9dd819a1 | 142 | static inline __must_check unsigned long |
d597580d AV |
143 | _copy_from_user(void *to, const void __user *from, unsigned long n) |
144 | { | |
145 | unsigned long res = n; | |
9c5f6908 | 146 | might_fault(); |
4d0e9df5 | 147 | if (!should_fail_usercopy() && likely(access_ok(from, n))) { |
33b75c1d | 148 | instrument_copy_from_user_before(to, from, n); |
d597580d | 149 | res = raw_copy_from_user(to, from, n); |
33b75c1d | 150 | instrument_copy_from_user_after(to, from, n, res); |
9c5f6908 | 151 | } |
d597580d AV |
152 | if (unlikely(res)) |
153 | memset(to + (n - res), 0, res); | |
154 | return res; | |
155 | } | |
156 | #else | |
9dd819a1 | 157 | extern __must_check unsigned long |
d597580d AV |
158 | _copy_from_user(void *, const void __user *, unsigned long); |
159 | #endif | |
160 | ||
161 | #ifdef INLINE_COPY_TO_USER | |
9dd819a1 | 162 | static inline __must_check unsigned long |
d597580d AV |
163 | _copy_to_user(void __user *to, const void *from, unsigned long n) |
164 | { | |
9c5f6908 | 165 | might_fault(); |
4d0e9df5 AL |
166 | if (should_fail_usercopy()) |
167 | return n; | |
96d4f267 | 168 | if (access_ok(to, n)) { |
76d6f06c | 169 | instrument_copy_to_user(to, from, n); |
d597580d | 170 | n = raw_copy_to_user(to, from, n); |
9c5f6908 | 171 | } |
d597580d AV |
172 | return n; |
173 | } | |
174 | #else | |
9dd819a1 | 175 | extern __must_check unsigned long |
d597580d AV |
176 | _copy_to_user(void __user *, const void *, unsigned long); |
177 | #endif | |
178 | ||
d597580d AV |
179 | static __always_inline unsigned long __must_check |
180 | copy_from_user(void *to, const void __user *from, unsigned long n) | |
181 | { | |
0e3c3b90 | 182 | if (check_copy_size(to, n, false)) |
d597580d | 183 | n = _copy_from_user(to, from, n); |
d597580d AV |
184 | return n; |
185 | } | |
186 | ||
187 | static __always_inline unsigned long __must_check | |
188 | copy_to_user(void __user *to, const void *from, unsigned long n) | |
189 | { | |
0e3c3b90 | 190 | if (check_copy_size(from, n, true)) |
d597580d | 191 | n = _copy_to_user(to, from, n); |
d597580d AV |
192 | return n; |
193 | } | |
d597580d | 194 | |
ec6347bb DW |
195 | #ifndef copy_mc_to_kernel |
196 | /* | |
197 | * Without arch opt-in this generic copy_mc_to_kernel() will not handle | |
198 | * #MC (or arch equivalent) during source read. | |
199 | */ | |
200 | static inline unsigned long __must_check | |
201 | copy_mc_to_kernel(void *dst, const void *src, size_t cnt) | |
202 | { | |
203 | memcpy(dst, src, cnt); | |
204 | return 0; | |
205 | } | |
206 | #endif | |
207 | ||
8bcbde54 DH |
208 | static __always_inline void pagefault_disabled_inc(void) |
209 | { | |
210 | current->pagefault_disabled++; | |
211 | } | |
212 | ||
213 | static __always_inline void pagefault_disabled_dec(void) | |
214 | { | |
215 | current->pagefault_disabled--; | |
8bcbde54 DH |
216 | } |
217 | ||
a866374a | 218 | /* |
8bcbde54 DH |
219 | * These routines enable/disable the pagefault handler. If disabled, it will |
220 | * not take any locks and go straight to the fixup table. | |
221 | * | |
8222dbe2 DH |
222 | * User access methods will not sleep when called from a pagefault_disabled() |
223 | * environment. | |
a866374a PZ |
224 | */ |
225 | static inline void pagefault_disable(void) | |
226 | { | |
8bcbde54 | 227 | pagefault_disabled_inc(); |
a866374a PZ |
228 | /* |
229 | * make sure to have issued the store before a pagefault | |
230 | * can hit. | |
231 | */ | |
232 | barrier(); | |
233 | } | |
234 | ||
235 | static inline void pagefault_enable(void) | |
236 | { | |
237 | /* | |
238 | * make sure to issue those last loads/stores before enabling | |
239 | * the pagefault handler again. | |
240 | */ | |
241 | barrier(); | |
8bcbde54 | 242 | pagefault_disabled_dec(); |
a866374a PZ |
243 | } |
244 | ||
8bcbde54 DH |
245 | /* |
246 | * Is the pagefault handler disabled? If so, user access methods will not sleep. | |
247 | */ | |
2d8d8fac MH |
248 | static inline bool pagefault_disabled(void) |
249 | { | |
250 | return current->pagefault_disabled != 0; | |
251 | } | |
8bcbde54 | 252 | |
70ffdb93 DH |
253 | /* |
254 | * The pagefault handler is in general disabled by pagefault_disable() or | |
255 | * when in irq context (via in_atomic()). | |
256 | * | |
257 | * This function should only be used by the fault handlers. Other users should | |
258 | * stick to pagefault_disabled(). | |
259 | * Please NEVER use preempt_disable() to disable the fault handler. With | |
260 | * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled. | |
261 | * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT. | |
262 | */ | |
263 | #define faulthandler_disabled() (pagefault_disabled() || in_atomic()) | |
264 | ||
da32b581 CM |
265 | #ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS |
266 | ||
267 | /** | |
268 | * probe_subpage_writeable: probe the user range for write faults at sub-page | |
269 | * granularity (e.g. arm64 MTE) | |
270 | * @uaddr: start of address range | |
271 | * @size: size of address range | |
272 | * | |
273 | * Returns 0 on success, the number of bytes not probed on fault. | |
274 | * | |
275 | * It is expected that the caller checked for the write permission of each | |
276 | * page in the range either by put_user() or GUP. The architecture port can | |
277 | * implement a more efficient get_user() probing if the same sub-page faults | |
278 | * are triggered by either a read or a write. | |
279 | */ | |
280 | static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size) | |
281 | { | |
282 | return 0; | |
283 | } | |
284 | ||
285 | #endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */ | |
286 | ||
c22ce143 HY |
287 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
288 | ||
9dd819a1 KC |
289 | static inline __must_check unsigned long |
290 | __copy_from_user_inatomic_nocache(void *to, const void __user *from, | |
291 | unsigned long n) | |
c22ce143 HY |
292 | { |
293 | return __copy_from_user_inatomic(to, from, n); | |
294 | } | |
295 | ||
c22ce143 HY |
296 | #endif /* ARCH_HAS_NOCACHE_UACCESS */ |
297 | ||
f5a1a536 AS |
298 | extern __must_check int check_zeroed_user(const void __user *from, size_t size); |
299 | ||
300 | /** | |
301 | * copy_struct_from_user: copy a struct from userspace | |
302 | * @dst: Destination address, in kernel space. This buffer must be @ksize | |
303 | * bytes long. | |
304 | * @ksize: Size of @dst struct. | |
305 | * @src: Source address, in userspace. | |
306 | * @usize: (Alleged) size of @src struct. | |
307 | * | |
308 | * Copies a struct from userspace to kernel space, in a way that guarantees | |
309 | * backwards-compatibility for struct syscall arguments (as long as future | |
310 | * struct extensions are made such that all new fields are *appended* to the | |
311 | * old struct, and zeroed-out new fields have the same meaning as the old | |
312 | * struct). | |
313 | * | |
314 | * @ksize is just sizeof(*dst), and @usize should've been passed by userspace. | |
315 | * The recommended usage is something like the following: | |
316 | * | |
317 | * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize) | |
318 | * { | |
319 | * int err; | |
320 | * struct foo karg = {}; | |
321 | * | |
322 | * if (usize > PAGE_SIZE) | |
323 | * return -E2BIG; | |
324 | * if (usize < FOO_SIZE_VER0) | |
325 | * return -EINVAL; | |
326 | * | |
327 | * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize); | |
328 | * if (err) | |
329 | * return err; | |
330 | * | |
331 | * // ... | |
332 | * } | |
333 | * | |
334 | * There are three cases to consider: | |
335 | * * If @usize == @ksize, then it's copied verbatim. | |
336 | * * If @usize < @ksize, then the userspace has passed an old struct to a | |
337 | * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize) | |
338 | * are to be zero-filled. | |
339 | * * If @usize > @ksize, then the userspace has passed a new struct to an | |
340 | * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize) | |
341 | * are checked to ensure they are zeroed, otherwise -E2BIG is returned. | |
342 | * | |
343 | * Returns (in all cases, some data may have been copied): | |
344 | * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src. | |
345 | * * -EFAULT: access to userspace failed. | |
346 | */ | |
347 | static __always_inline __must_check int | |
348 | copy_struct_from_user(void *dst, size_t ksize, const void __user *src, | |
349 | size_t usize) | |
350 | { | |
351 | size_t size = min(ksize, usize); | |
352 | size_t rest = max(ksize, usize) - size; | |
353 | ||
04ffde13 KC |
354 | /* Double check if ksize is larger than a known object size. */ |
355 | if (WARN_ON_ONCE(ksize > __builtin_object_size(dst, 1))) | |
356 | return -E2BIG; | |
357 | ||
f5a1a536 AS |
358 | /* Deal with trailing bytes. */ |
359 | if (usize < ksize) { | |
360 | memset(dst + size, 0, rest); | |
361 | } else if (usize > ksize) { | |
362 | int ret = check_zeroed_user(src + size, rest); | |
363 | if (ret <= 0) | |
364 | return ret ?: -E2BIG; | |
365 | } | |
366 | /* Copy the interoperable parts of the struct. */ | |
367 | if (copy_from_user(dst, src, size)) | |
368 | return -EFAULT; | |
369 | return 0; | |
370 | } | |
371 | ||
fe557319 | 372 | bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); |
eab0c608 | 373 | |
fe557319 CH |
374 | long copy_from_kernel_nofault(void *dst, const void *src, size_t size); |
375 | long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size); | |
3d708182 | 376 | |
c0ee37e8 CH |
377 | long copy_from_user_nofault(void *dst, const void __user *src, size_t size); |
378 | long notrace copy_to_user_nofault(void __user *dst, const void *src, | |
fe557319 | 379 | size_t size); |
1d1585ca | 380 | |
c4cb1644 CH |
381 | long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr, |
382 | long count); | |
eab0c608 | 383 | |
bd88bb5d CH |
384 | long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr, |
385 | long count); | |
02dddb16 | 386 | long strnlen_user_nofault(const void __user *unsafe_addr, long count); |
1a6877b9 | 387 | |
34737e26 AB |
388 | #ifndef __get_kernel_nofault |
389 | #define __get_kernel_nofault(dst, src, type, label) \ | |
390 | do { \ | |
391 | type __user *p = (type __force __user *)(src); \ | |
392 | type data; \ | |
393 | if (__get_user(data, p)) \ | |
394 | goto label; \ | |
395 | *(type *)dst = data; \ | |
396 | } while (0) | |
397 | ||
398 | #define __put_kernel_nofault(dst, src, type, label) \ | |
399 | do { \ | |
400 | type __user *p = (type __force __user *)(dst); \ | |
401 | type data = *(type *)src; \ | |
402 | if (__put_user(data, p)) \ | |
403 | goto label; \ | |
404 | } while (0) | |
405 | #endif | |
406 | ||
0ab32b6f | 407 | /** |
25f12ae4 CH |
408 | * get_kernel_nofault(): safely attempt to read from a location |
409 | * @val: read into this variable | |
410 | * @ptr: address to read from | |
0ab32b6f AM |
411 | * |
412 | * Returns 0 on success, or -EFAULT. | |
413 | */ | |
0c389d89 LT |
414 | #define get_kernel_nofault(val, ptr) ({ \ |
415 | const typeof(val) *__gk_ptr = (ptr); \ | |
416 | copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\ | |
417 | }) | |
0ab32b6f | 418 | |
5b24a7a2 | 419 | #ifndef user_access_begin |
594cc251 | 420 | #define user_access_begin(ptr,len) access_ok(ptr, len) |
5b24a7a2 | 421 | #define user_access_end() do { } while (0) |
c512c691 LT |
422 | #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0) |
423 | #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e) | |
424 | #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e) | |
425 | #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e) | |
fb05121f | 426 | #define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e) |
e74deb11 PZ |
427 | static inline unsigned long user_access_save(void) { return 0UL; } |
428 | static inline void user_access_restore(unsigned long flags) { } | |
5b24a7a2 | 429 | #endif |
999a2289 CL |
430 | #ifndef user_write_access_begin |
431 | #define user_write_access_begin user_access_begin | |
432 | #define user_write_access_end user_access_end | |
433 | #endif | |
434 | #ifndef user_read_access_begin | |
435 | #define user_read_access_begin user_access_begin | |
436 | #define user_read_access_end user_access_end | |
437 | #endif | |
5b24a7a2 | 438 | |
b394d468 KC |
439 | #ifdef CONFIG_HARDENED_USERCOPY |
440 | void __noreturn usercopy_abort(const char *name, const char *detail, | |
441 | bool to_user, unsigned long offset, | |
442 | unsigned long len); | |
443 | #endif | |
444 | ||
c22ce143 | 445 | #endif /* __LINUX_UACCESS_H__ */ |