Merge tag 'jfs-5.19' of https://github.com/kleikamp/linux-shaggy
[linux-block.git] / include / linux / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
c22ce143
HY
2#ifndef __LINUX_UACCESS_H__
3#define __LINUX_UACCESS_H__
4
4d0e9df5 5#include <linux/fault-inject-usercopy.h>
76d6f06c 6#include <linux/instrumented.h>
b296a6d5 7#include <linux/minmax.h>
8bcbde54 8#include <linux/sched.h>
af1d5b37 9#include <linux/thread_info.h>
5e6039d8 10
c22ce143
HY
11#include <asm/uaccess.h>
12
d597580d
AV
13/*
14 * Architectures should provide two primitives (raw_copy_{to,from}_user())
701cac61
AV
15 * and get rid of their private instances of copy_{to,from}_user() and
16 * __copy_{to,from}_user{,_inatomic}().
d597580d
AV
17 *
18 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
19 * return the amount left to copy. They should assume that access_ok() has
20 * already been checked (and succeeded); they should *not* zero-pad anything.
21 * No KASAN or object size checks either - those belong here.
22 *
23 * Both of these functions should attempt to copy size bytes starting at from
24 * into the area starting at to. They must not fetch or store anything
25 * outside of those areas. Return value must be between 0 (everything
26 * copied successfully) and size (nothing copied).
27 *
28 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
29 * at to must become equal to the bytes fetched from the corresponding area
30 * starting at from. All data past to + size - N must be left unmodified.
31 *
32 * If copying succeeds, the return value must be 0. If some data cannot be
33 * fetched, it is permitted to copy less than had been fetched; the only
34 * hard requirement is that not storing anything at all (i.e. returning size)
35 * should happen only when nothing could be copied. In other words, you don't
36 * have to squeeze as much as possible - it is allowed, but not necessary.
37 *
38 * For raw_copy_from_user() to always points to kernel memory and no faults
39 * on store should happen. Interpretation of from is affected by set_fs().
40 * For raw_copy_to_user() it's the other way round.
41 *
42 * Both can be inlined - it's up to architectures whether it wants to bother
43 * with that. They should not be used directly; they are used to implement
44 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
45 * that are used instead. Out of those, __... ones are inlined. Plain
46 * copy_{to,from}_user() might or might not be inlined. If you want them
47 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
48 *
49 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
50 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
51 * at all; their callers absolutely must check the return value.
52 *
53 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
54 * but both source and destination are __user pointers (affected by set_fs()
55 * as usual) and both source and destination can trigger faults.
56 */
57
9dd819a1 58static __always_inline __must_check unsigned long
d597580d
AV
59__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
60{
76d6f06c 61 instrument_copy_from_user(to, from, n);
d597580d
AV
62 check_object_size(to, n, false);
63 return raw_copy_from_user(to, from, n);
64}
65
9dd819a1 66static __always_inline __must_check unsigned long
d597580d
AV
67__copy_from_user(void *to, const void __user *from, unsigned long n)
68{
69 might_fault();
4d0e9df5
AL
70 if (should_fail_usercopy())
71 return n;
76d6f06c 72 instrument_copy_from_user(to, from, n);
d597580d
AV
73 check_object_size(to, n, false);
74 return raw_copy_from_user(to, from, n);
75}
76
77/**
78 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
79 * @to: Destination address, in user space.
80 * @from: Source address, in kernel space.
81 * @n: Number of bytes to copy.
82 *
83 * Context: User context only.
84 *
85 * Copy data from kernel space to user space. Caller must check
86 * the specified block with access_ok() before calling this function.
87 * The caller should also make sure he pins the user space address
88 * so that we don't result in page fault and sleep.
89 */
9dd819a1 90static __always_inline __must_check unsigned long
d597580d
AV
91__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
92{
4d0e9df5
AL
93 if (should_fail_usercopy())
94 return n;
76d6f06c 95 instrument_copy_to_user(to, from, n);
d597580d
AV
96 check_object_size(from, n, true);
97 return raw_copy_to_user(to, from, n);
98}
99
9dd819a1 100static __always_inline __must_check unsigned long
d597580d
AV
101__copy_to_user(void __user *to, const void *from, unsigned long n)
102{
103 might_fault();
4d0e9df5
AL
104 if (should_fail_usercopy())
105 return n;
76d6f06c 106 instrument_copy_to_user(to, from, n);
d597580d
AV
107 check_object_size(from, n, true);
108 return raw_copy_to_user(to, from, n);
109}
110
111#ifdef INLINE_COPY_FROM_USER
9dd819a1 112static inline __must_check unsigned long
d597580d
AV
113_copy_from_user(void *to, const void __user *from, unsigned long n)
114{
115 unsigned long res = n;
9c5f6908 116 might_fault();
4d0e9df5 117 if (!should_fail_usercopy() && likely(access_ok(from, n))) {
76d6f06c 118 instrument_copy_from_user(to, from, n);
d597580d 119 res = raw_copy_from_user(to, from, n);
9c5f6908 120 }
d597580d
AV
121 if (unlikely(res))
122 memset(to + (n - res), 0, res);
123 return res;
124}
125#else
9dd819a1 126extern __must_check unsigned long
d597580d
AV
127_copy_from_user(void *, const void __user *, unsigned long);
128#endif
129
130#ifdef INLINE_COPY_TO_USER
9dd819a1 131static inline __must_check unsigned long
d597580d
AV
132_copy_to_user(void __user *to, const void *from, unsigned long n)
133{
9c5f6908 134 might_fault();
4d0e9df5
AL
135 if (should_fail_usercopy())
136 return n;
96d4f267 137 if (access_ok(to, n)) {
76d6f06c 138 instrument_copy_to_user(to, from, n);
d597580d 139 n = raw_copy_to_user(to, from, n);
9c5f6908 140 }
d597580d
AV
141 return n;
142}
143#else
9dd819a1 144extern __must_check unsigned long
d597580d
AV
145_copy_to_user(void __user *, const void *, unsigned long);
146#endif
147
d597580d
AV
148static __always_inline unsigned long __must_check
149copy_from_user(void *to, const void __user *from, unsigned long n)
150{
b0377fed 151 if (likely(check_copy_size(to, n, false)))
d597580d 152 n = _copy_from_user(to, from, n);
d597580d
AV
153 return n;
154}
155
156static __always_inline unsigned long __must_check
157copy_to_user(void __user *to, const void *from, unsigned long n)
158{
b0377fed 159 if (likely(check_copy_size(from, n, true)))
d597580d 160 n = _copy_to_user(to, from, n);
d597580d
AV
161 return n;
162}
d597580d 163
ec6347bb
DW
164#ifndef copy_mc_to_kernel
165/*
166 * Without arch opt-in this generic copy_mc_to_kernel() will not handle
167 * #MC (or arch equivalent) during source read.
168 */
169static inline unsigned long __must_check
170copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
171{
172 memcpy(dst, src, cnt);
173 return 0;
174}
175#endif
176
8bcbde54
DH
177static __always_inline void pagefault_disabled_inc(void)
178{
179 current->pagefault_disabled++;
180}
181
182static __always_inline void pagefault_disabled_dec(void)
183{
184 current->pagefault_disabled--;
8bcbde54
DH
185}
186
a866374a 187/*
8bcbde54
DH
188 * These routines enable/disable the pagefault handler. If disabled, it will
189 * not take any locks and go straight to the fixup table.
190 *
8222dbe2
DH
191 * User access methods will not sleep when called from a pagefault_disabled()
192 * environment.
a866374a
PZ
193 */
194static inline void pagefault_disable(void)
195{
8bcbde54 196 pagefault_disabled_inc();
a866374a
PZ
197 /*
198 * make sure to have issued the store before a pagefault
199 * can hit.
200 */
201 barrier();
202}
203
204static inline void pagefault_enable(void)
205{
206 /*
207 * make sure to issue those last loads/stores before enabling
208 * the pagefault handler again.
209 */
210 barrier();
8bcbde54 211 pagefault_disabled_dec();
a866374a
PZ
212}
213
8bcbde54
DH
214/*
215 * Is the pagefault handler disabled? If so, user access methods will not sleep.
216 */
2d8d8fac
MH
217static inline bool pagefault_disabled(void)
218{
219 return current->pagefault_disabled != 0;
220}
8bcbde54 221
70ffdb93
DH
222/*
223 * The pagefault handler is in general disabled by pagefault_disable() or
224 * when in irq context (via in_atomic()).
225 *
226 * This function should only be used by the fault handlers. Other users should
227 * stick to pagefault_disabled().
228 * Please NEVER use preempt_disable() to disable the fault handler. With
229 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
230 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
231 */
232#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
233
da32b581
CM
234#ifndef CONFIG_ARCH_HAS_SUBPAGE_FAULTS
235
236/**
237 * probe_subpage_writeable: probe the user range for write faults at sub-page
238 * granularity (e.g. arm64 MTE)
239 * @uaddr: start of address range
240 * @size: size of address range
241 *
242 * Returns 0 on success, the number of bytes not probed on fault.
243 *
244 * It is expected that the caller checked for the write permission of each
245 * page in the range either by put_user() or GUP. The architecture port can
246 * implement a more efficient get_user() probing if the same sub-page faults
247 * are triggered by either a read or a write.
248 */
249static inline size_t probe_subpage_writeable(char __user *uaddr, size_t size)
250{
251 return 0;
252}
253
254#endif /* CONFIG_ARCH_HAS_SUBPAGE_FAULTS */
255
c22ce143
HY
256#ifndef ARCH_HAS_NOCACHE_UACCESS
257
9dd819a1
KC
258static inline __must_check unsigned long
259__copy_from_user_inatomic_nocache(void *to, const void __user *from,
260 unsigned long n)
c22ce143
HY
261{
262 return __copy_from_user_inatomic(to, from, n);
263}
264
c22ce143
HY
265#endif /* ARCH_HAS_NOCACHE_UACCESS */
266
f5a1a536
AS
267extern __must_check int check_zeroed_user(const void __user *from, size_t size);
268
269/**
270 * copy_struct_from_user: copy a struct from userspace
271 * @dst: Destination address, in kernel space. This buffer must be @ksize
272 * bytes long.
273 * @ksize: Size of @dst struct.
274 * @src: Source address, in userspace.
275 * @usize: (Alleged) size of @src struct.
276 *
277 * Copies a struct from userspace to kernel space, in a way that guarantees
278 * backwards-compatibility for struct syscall arguments (as long as future
279 * struct extensions are made such that all new fields are *appended* to the
280 * old struct, and zeroed-out new fields have the same meaning as the old
281 * struct).
282 *
283 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
284 * The recommended usage is something like the following:
285 *
286 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
287 * {
288 * int err;
289 * struct foo karg = {};
290 *
291 * if (usize > PAGE_SIZE)
292 * return -E2BIG;
293 * if (usize < FOO_SIZE_VER0)
294 * return -EINVAL;
295 *
296 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
297 * if (err)
298 * return err;
299 *
300 * // ...
301 * }
302 *
303 * There are three cases to consider:
304 * * If @usize == @ksize, then it's copied verbatim.
305 * * If @usize < @ksize, then the userspace has passed an old struct to a
306 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
307 * are to be zero-filled.
308 * * If @usize > @ksize, then the userspace has passed a new struct to an
309 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
310 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
311 *
312 * Returns (in all cases, some data may have been copied):
313 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
314 * * -EFAULT: access to userspace failed.
315 */
316static __always_inline __must_check int
317copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
318 size_t usize)
319{
320 size_t size = min(ksize, usize);
321 size_t rest = max(ksize, usize) - size;
322
323 /* Deal with trailing bytes. */
324 if (usize < ksize) {
325 memset(dst + size, 0, rest);
326 } else if (usize > ksize) {
327 int ret = check_zeroed_user(src + size, rest);
328 if (ret <= 0)
329 return ret ?: -E2BIG;
330 }
331 /* Copy the interoperable parts of the struct. */
332 if (copy_from_user(dst, src, size))
333 return -EFAULT;
334 return 0;
335}
336
fe557319 337bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
eab0c608 338
fe557319
CH
339long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
340long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
3d708182 341
c0ee37e8
CH
342long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
343long notrace copy_to_user_nofault(void __user *dst, const void *src,
fe557319 344 size_t size);
1d1585ca 345
c4cb1644
CH
346long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
347 long count);
eab0c608 348
bd88bb5d
CH
349long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
350 long count);
02dddb16 351long strnlen_user_nofault(const void __user *unsafe_addr, long count);
1a6877b9 352
34737e26
AB
353#ifndef __get_kernel_nofault
354#define __get_kernel_nofault(dst, src, type, label) \
355do { \
356 type __user *p = (type __force __user *)(src); \
357 type data; \
358 if (__get_user(data, p)) \
359 goto label; \
360 *(type *)dst = data; \
361} while (0)
362
363#define __put_kernel_nofault(dst, src, type, label) \
364do { \
365 type __user *p = (type __force __user *)(dst); \
366 type data = *(type *)src; \
367 if (__put_user(data, p)) \
368 goto label; \
369} while (0)
370#endif
371
0ab32b6f 372/**
25f12ae4
CH
373 * get_kernel_nofault(): safely attempt to read from a location
374 * @val: read into this variable
375 * @ptr: address to read from
0ab32b6f
AM
376 *
377 * Returns 0 on success, or -EFAULT.
378 */
0c389d89
LT
379#define get_kernel_nofault(val, ptr) ({ \
380 const typeof(val) *__gk_ptr = (ptr); \
381 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
382})
0ab32b6f 383
5b24a7a2 384#ifndef user_access_begin
594cc251 385#define user_access_begin(ptr,len) access_ok(ptr, len)
5b24a7a2 386#define user_access_end() do { } while (0)
c512c691
LT
387#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
388#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
389#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
390#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
fb05121f 391#define unsafe_copy_from_user(d,s,l,e) unsafe_op_wrap(__copy_from_user(d,s,l),e)
e74deb11
PZ
392static inline unsigned long user_access_save(void) { return 0UL; }
393static inline void user_access_restore(unsigned long flags) { }
5b24a7a2 394#endif
999a2289
CL
395#ifndef user_write_access_begin
396#define user_write_access_begin user_access_begin
397#define user_write_access_end user_access_end
398#endif
399#ifndef user_read_access_begin
400#define user_read_access_begin user_access_begin
401#define user_read_access_end user_access_end
402#endif
5b24a7a2 403
b394d468
KC
404#ifdef CONFIG_HARDENED_USERCOPY
405void __noreturn usercopy_abort(const char *name, const char *detail,
406 bool to_user, unsigned long offset,
407 unsigned long len);
408#endif
409
c22ce143 410#endif /* __LINUX_UACCESS_H__ */