1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_UACCESS_H__
3 #define __LINUX_UACCESS_H__
5 #include <linux/instrumented.h>
6 #include <linux/minmax.h>
7 #include <linux/sched.h>
8 #include <linux/thread_info.h>
10 #include <asm/uaccess.h>
13 * Force the uaccess routines to be wired up for actual userspace access,
14 * overriding any possible set_fs(KERNEL_DS) still lingering around. Undone
15 * using force_uaccess_end below.
17 static inline mm_segment_t force_uaccess_begin(void)
19 mm_segment_t fs = get_fs();
25 static inline void force_uaccess_end(mm_segment_t oldfs)
31 * Architectures should provide two primitives (raw_copy_{to,from}_user())
32 * and get rid of their private instances of copy_{to,from}_user() and
33 * __copy_{to,from}_user{,_inatomic}().
35 * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
36 * return the amount left to copy. They should assume that access_ok() has
37 * already been checked (and succeeded); they should *not* zero-pad anything.
38 * No KASAN or object size checks either - those belong here.
40 * Both of these functions should attempt to copy size bytes starting at from
41 * into the area starting at to. They must not fetch or store anything
42 * outside of those areas. Return value must be between 0 (everything
43 * copied successfully) and size (nothing copied).
45 * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
46 * at to must become equal to the bytes fetched from the corresponding area
47 * starting at from. All data past to + size - N must be left unmodified.
49 * If copying succeeds, the return value must be 0. If some data cannot be
50 * fetched, it is permitted to copy less than had been fetched; the only
51 * hard requirement is that not storing anything at all (i.e. returning size)
52 * should happen only when nothing could be copied. In other words, you don't
53 * have to squeeze as much as possible - it is allowed, but not necessary.
55 * For raw_copy_from_user() to always points to kernel memory and no faults
56 * on store should happen. Interpretation of from is affected by set_fs().
57 * For raw_copy_to_user() it's the other way round.
59 * Both can be inlined - it's up to architectures whether it wants to bother
60 * with that. They should not be used directly; they are used to implement
61 * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
62 * that are used instead. Out of those, __... ones are inlined. Plain
63 * copy_{to,from}_user() might or might not be inlined. If you want them
64 * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
66 * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
67 * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
68 * at all; their callers absolutely must check the return value.
70 * Biarch ones should also provide raw_copy_in_user() - similar to the above,
71 * but both source and destination are __user pointers (affected by set_fs()
72 * as usual) and both source and destination can trigger faults.
75 static __always_inline __must_check unsigned long
76 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
78 instrument_copy_from_user(to, from, n);
79 check_object_size(to, n, false);
80 return raw_copy_from_user(to, from, n);
83 static __always_inline __must_check unsigned long
84 __copy_from_user(void *to, const void __user *from, unsigned long n)
87 instrument_copy_from_user(to, from, n);
88 check_object_size(to, n, false);
89 return raw_copy_from_user(to, from, n);
93 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
94 * @to: Destination address, in user space.
95 * @from: Source address, in kernel space.
96 * @n: Number of bytes to copy.
98 * Context: User context only.
100 * Copy data from kernel space to user space. Caller must check
101 * the specified block with access_ok() before calling this function.
102 * The caller should also make sure he pins the user space address
103 * so that we don't result in page fault and sleep.
105 static __always_inline __must_check unsigned long
106 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
108 instrument_copy_to_user(to, from, n);
109 check_object_size(from, n, true);
110 return raw_copy_to_user(to, from, n);
113 static __always_inline __must_check unsigned long
114 __copy_to_user(void __user *to, const void *from, unsigned long n)
117 instrument_copy_to_user(to, from, n);
118 check_object_size(from, n, true);
119 return raw_copy_to_user(to, from, n);
122 #ifdef INLINE_COPY_FROM_USER
123 static inline __must_check unsigned long
124 _copy_from_user(void *to, const void __user *from, unsigned long n)
126 unsigned long res = n;
128 if (likely(access_ok(from, n))) {
129 instrument_copy_from_user(to, from, n);
130 res = raw_copy_from_user(to, from, n);
133 memset(to + (n - res), 0, res);
137 extern __must_check unsigned long
138 _copy_from_user(void *, const void __user *, unsigned long);
141 #ifdef INLINE_COPY_TO_USER
142 static inline __must_check unsigned long
143 _copy_to_user(void __user *to, const void *from, unsigned long n)
146 if (access_ok(to, n)) {
147 instrument_copy_to_user(to, from, n);
148 n = raw_copy_to_user(to, from, n);
153 extern __must_check unsigned long
154 _copy_to_user(void __user *, const void *, unsigned long);
157 static __always_inline unsigned long __must_check
158 copy_from_user(void *to, const void __user *from, unsigned long n)
160 if (likely(check_copy_size(to, n, false)))
161 n = _copy_from_user(to, from, n);
165 static __always_inline unsigned long __must_check
166 copy_to_user(void __user *to, const void *from, unsigned long n)
168 if (likely(check_copy_size(from, n, true)))
169 n = _copy_to_user(to, from, n);
173 static __always_inline unsigned long __must_check
174 copy_in_user(void __user *to, const void __user *from, unsigned long n)
177 if (access_ok(to, n) && access_ok(from, n))
178 n = raw_copy_in_user(to, from, n);
183 #ifndef copy_mc_to_kernel
185 * Without arch opt-in this generic copy_mc_to_kernel() will not handle
186 * #MC (or arch equivalent) during source read.
188 static inline unsigned long __must_check
189 copy_mc_to_kernel(void *dst, const void *src, size_t cnt)
191 memcpy(dst, src, cnt);
196 static __always_inline void pagefault_disabled_inc(void)
198 current->pagefault_disabled++;
201 static __always_inline void pagefault_disabled_dec(void)
203 current->pagefault_disabled--;
207 * These routines enable/disable the pagefault handler. If disabled, it will
208 * not take any locks and go straight to the fixup table.
210 * User access methods will not sleep when called from a pagefault_disabled()
213 static inline void pagefault_disable(void)
215 pagefault_disabled_inc();
217 * make sure to have issued the store before a pagefault
223 static inline void pagefault_enable(void)
226 * make sure to issue those last loads/stores before enabling
227 * the pagefault handler again.
230 pagefault_disabled_dec();
234 * Is the pagefault handler disabled? If so, user access methods will not sleep.
236 static inline bool pagefault_disabled(void)
238 return current->pagefault_disabled != 0;
242 * The pagefault handler is in general disabled by pagefault_disable() or
243 * when in irq context (via in_atomic()).
245 * This function should only be used by the fault handlers. Other users should
246 * stick to pagefault_disabled().
247 * Please NEVER use preempt_disable() to disable the fault handler. With
248 * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
249 * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
251 #define faulthandler_disabled() (pagefault_disabled() || in_atomic())
253 #ifndef ARCH_HAS_NOCACHE_UACCESS
255 static inline __must_check unsigned long
256 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
259 return __copy_from_user_inatomic(to, from, n);
262 #endif /* ARCH_HAS_NOCACHE_UACCESS */
264 extern __must_check int check_zeroed_user(const void __user *from, size_t size);
267 * copy_struct_from_user: copy a struct from userspace
268 * @dst: Destination address, in kernel space. This buffer must be @ksize
270 * @ksize: Size of @dst struct.
271 * @src: Source address, in userspace.
272 * @usize: (Alleged) size of @src struct.
274 * Copies a struct from userspace to kernel space, in a way that guarantees
275 * backwards-compatibility for struct syscall arguments (as long as future
276 * struct extensions are made such that all new fields are *appended* to the
277 * old struct, and zeroed-out new fields have the same meaning as the old
280 * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
281 * The recommended usage is something like the following:
283 * SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
286 * struct foo karg = {};
288 * if (usize > PAGE_SIZE)
290 * if (usize < FOO_SIZE_VER0)
293 * err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
300 * There are three cases to consider:
301 * * If @usize == @ksize, then it's copied verbatim.
302 * * If @usize < @ksize, then the userspace has passed an old struct to a
303 * newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
304 * are to be zero-filled.
305 * * If @usize > @ksize, then the userspace has passed a new struct to an
306 * older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
307 * are checked to ensure they are zeroed, otherwise -E2BIG is returned.
309 * Returns (in all cases, some data may have been copied):
310 * * -E2BIG: (@usize > @ksize) and there are non-zero trailing bytes in @src.
311 * * -EFAULT: access to userspace failed.
313 static __always_inline __must_check int
314 copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
317 size_t size = min(ksize, usize);
318 size_t rest = max(ksize, usize) - size;
320 /* Deal with trailing bytes. */
322 memset(dst + size, 0, rest);
323 } else if (usize > ksize) {
324 int ret = check_zeroed_user(src + size, rest);
326 return ret ?: -E2BIG;
328 /* Copy the interoperable parts of the struct. */
329 if (copy_from_user(dst, src, size))
334 bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size);
336 long copy_from_kernel_nofault(void *dst, const void *src, size_t size);
337 long notrace copy_to_kernel_nofault(void *dst, const void *src, size_t size);
339 long copy_from_user_nofault(void *dst, const void __user *src, size_t size);
340 long notrace copy_to_user_nofault(void __user *dst, const void *src,
343 long strncpy_from_kernel_nofault(char *dst, const void *unsafe_addr,
346 long strncpy_from_user_nofault(char *dst, const void __user *unsafe_addr,
348 long strnlen_user_nofault(const void __user *unsafe_addr, long count);
351 * get_kernel_nofault(): safely attempt to read from a location
352 * @val: read into this variable
353 * @ptr: address to read from
355 * Returns 0 on success, or -EFAULT.
357 #define get_kernel_nofault(val, ptr) ({ \
358 const typeof(val) *__gk_ptr = (ptr); \
359 copy_from_kernel_nofault(&(val), __gk_ptr, sizeof(val));\
362 #ifndef user_access_begin
363 #define user_access_begin(ptr,len) access_ok(ptr, len)
364 #define user_access_end() do { } while (0)
365 #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
366 #define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
367 #define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
368 #define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
369 static inline unsigned long user_access_save(void) { return 0UL; }
370 static inline void user_access_restore(unsigned long flags) { }
372 #ifndef user_write_access_begin
373 #define user_write_access_begin user_access_begin
374 #define user_write_access_end user_access_end
376 #ifndef user_read_access_begin
377 #define user_read_access_begin user_access_begin
378 #define user_read_access_end user_access_end
381 #ifdef CONFIG_HARDENED_USERCOPY
382 void usercopy_warn(const char *name, const char *detail, bool to_user,
383 unsigned long offset, unsigned long len);
384 void __noreturn usercopy_abort(const char *name, const char *detail,
385 bool to_user, unsigned long offset,
389 #endif /* __LINUX_UACCESS_H__ */