Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / x86 / include / asm / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
ca233862
GC
4/*
5 * User space memory access functions
6 */
ca233862 7#include <linux/compiler.h>
1771c6e1 8#include <linux/kasan-checks.h>
ca233862
GC
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
63bcff2a 12#include <asm/smap.h>
45caf470 13#include <asm/extable.h>
ca233862 14
ca233862
GC
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
9063c61f 26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
ca233862 27
13d4ea09 28#define get_fs() (current->thread.addr_limit)
5ea0727b
TG
29static inline void set_fs(mm_segment_t fs)
30{
31 current->thread.addr_limit = fs;
32 /* On user-mode return, check fs is correct */
33 set_thread_flag(TIF_FSCHECK);
34}
ca233862
GC
35
36#define segment_eq(a, b) ((a).seg == (b).seg)
13d4ea09 37#define user_addr_max() (current->thread.addr_limit.seg)
002ca169 38
ca233862
GC
39/*
40 * Test whether a block of memory is a valid user space address.
41 * Returns 0 if the range is valid, nonzero otherwise.
ca233862 42 */
a740576a 43static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
c5fe5d80
LT
44{
45 /*
46 * If we have used "sizeof()" for the size,
47 * we know it won't overflow the limit (but
48 * it might overflow the 'addr', so it's
49 * important to subtract the size from the
50 * limit, not add it to the address).
51 */
52 if (__builtin_constant_p(size))
7e0f51cb 53 return unlikely(addr > limit - size);
c5fe5d80
LT
54
55 /* Arbitrary sizes? Be careful about overflow */
56 addr += size;
7e0f51cb 57 if (unlikely(addr < size))
a740576a 58 return true;
7e0f51cb 59 return unlikely(addr > limit);
c5fe5d80 60}
ca233862 61
bc6ca7b3 62#define __range_not_ok(addr, size, limit) \
ca233862 63({ \
ca233862 64 __chk_user_ptr(addr); \
c5fe5d80 65 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
ca233862
GC
66})
67
7c478895 68#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
2d8d8fac
MH
69static inline bool pagefault_disabled(void);
70# define WARN_ON_IN_IRQ() \
71 WARN_ON_ONCE(!in_task() && !pagefault_disabled())
7c478895
PZ
72#else
73# define WARN_ON_IN_IRQ()
74#endif
75
ca233862 76/**
bc8ff3ca 77 * access_ok - Checks if a user space pointer is valid
ca233862
GC
78 * @addr: User space pointer to start of block to check
79 * @size: Size of block to check
80 *
b3c395ef
DH
81 * Context: User context only. This function may sleep if pagefaults are
82 * enabled.
ca233862
GC
83 *
84 * Checks if a pointer to a block of memory in user space is valid.
85 *
ca233862
GC
86 * Note that, depending on architecture, this function probably just
87 * checks that the pointer is in the user space range - after calling
88 * this function, memory access functions may still return -EFAULT.
bc8ff3ca
MR
89 *
90 * Return: true (nonzero) if the memory block may be valid, false (zero)
91 * if it is definitely invalid.
ca233862 92 */
96d4f267 93#define access_ok(addr, size) \
7c478895
PZ
94({ \
95 WARN_ON_IN_IRQ(); \
96 likely(!__range_not_ok(addr, size, user_addr_max())); \
97})
ca233862 98
ca233862
GC
99/*
100 * These are the main single-value transfer routines. They automatically
101 * use the right size if we just have the right pointer type.
102 *
103 * This gets kind of ugly. We want to return _two_ values in "get_user()"
104 * and yet we don't want to do any pointers, because that is too much
105 * of a performance impact. Thus we have a few rather ugly macros here,
106 * and hide all the ugliness from the user.
107 *
108 * The "__xxx" versions of the user access functions are versions that
109 * do not verify the address space, that must have been done previously
110 * with a separate "access_ok()" call (this is used when we do multiple
111 * accesses to the same area of user memory).
112 */
113
114extern int __get_user_1(void);
115extern int __get_user_2(void);
116extern int __get_user_4(void);
117extern int __get_user_8(void);
118extern int __get_user_bad(void);
119
11f1a4b9
LT
120#define __uaccess_begin() stac()
121#define __uaccess_end() clac()
b3bbfb3f
DW
122#define __uaccess_begin_nospec() \
123({ \
124 stac(); \
125 barrier_nospec(); \
126})
11f1a4b9 127
3578baae
PA
128/*
129 * This is a type: either unsigned long, if the argument fits into
130 * that type, or otherwise unsigned long long.
131 */
132#define __inttype(x) \
133__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
865e5b76
GC
134
135/**
bc8ff3ca 136 * get_user - Get a simple variable from user space.
865e5b76
GC
137 * @x: Variable to store result.
138 * @ptr: Source address, in user space.
139 *
b3c395ef
DH
140 * Context: User context only. This function may sleep if pagefaults are
141 * enabled.
865e5b76
GC
142 *
143 * This macro copies a single simple variable from user space to kernel
144 * space. It supports simple types like char and int, but not larger
145 * data types like structures or arrays.
146 *
147 * @ptr must have pointer-to-simple-variable type, and the result of
148 * dereferencing @ptr must be assignable to @x without a cast.
149 *
bc8ff3ca 150 * Return: zero on success, or -EFAULT on error.
865e5b76 151 * On error, the variable @x is set to zero.
ff52c3b0
PA
152 */
153/*
3578baae
PA
154 * Careful: we have to cast the result to the type of the pointer
155 * for sign reasons.
ff52c3b0 156 *
f69fa9a9 157 * The use of _ASM_DX as the register specifier is a bit of a
ff52c3b0
PA
158 * simplification, as gcc only cares about it as the starting point
159 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
160 * (%ecx being the next register in gcc's x86 register sequence), and
161 * %rdx on 64 bits.
f69fa9a9
PA
162 *
163 * Clang/LLVM cares about the size of the register, but still wants
164 * the base register for something that ends up being a pair.
865e5b76 165 */
865e5b76
GC
166#define get_user(x, ptr) \
167({ \
168 int __ret_gu; \
bdfc017e 169 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
865e5b76 170 __chk_user_ptr(ptr); \
3ee1afa3 171 might_fault(); \
f05058c4 172 asm volatile("call __get_user_%P4" \
f5caf621
JP
173 : "=a" (__ret_gu), "=r" (__val_gu), \
174 ASM_CALL_CONSTRAINT \
3578baae 175 : "0" (ptr), "i" (sizeof(*(ptr)))); \
e182c570 176 (x) = (__force __typeof__(*(ptr))) __val_gu; \
a76cf66e 177 __builtin_expect(__ret_gu, 0); \
865e5b76
GC
178})
179
e30a44fd
GC
180#define __put_user_x(size, x, ptr, __ret_pu) \
181 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
4d5d7838 182 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
e30a44fd
GC
183
184
185
dc70ddf4 186#ifdef CONFIG_X86_32
a959dc88
LT
187#define __put_user_goto_u64(x, addr, label) \
188 asm_volatile_goto("\n" \
189 "1: movl %%eax,0(%1)\n" \
190 "2: movl %%edx,4(%1)\n" \
191 _ASM_EXTABLE_UA(1b, %l2) \
192 _ASM_EXTABLE_UA(2b, %l2) \
193 : : "A" (x), "r" (addr) \
194 : : label)
e30a44fd 195
fe40c0af 196#define __put_user_asm_ex_u64(x, addr) \
11f1a4b9 197 asm volatile("\n" \
63bcff2a 198 "1: movl %%eax,0(%1)\n" \
fe40c0af 199 "2: movl %%edx,4(%1)\n" \
11f1a4b9 200 "3:" \
535c0c34
PA
201 _ASM_EXTABLE_EX(1b, 2b) \
202 _ASM_EXTABLE_EX(2b, 3b) \
fe40c0af
HS
203 : : "A" (x), "r" (addr))
204
e30a44fd
GC
205#define __put_user_x8(x, ptr, __ret_pu) \
206 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
207 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
dc70ddf4 208#else
a959dc88
LT
209#define __put_user_goto_u64(x, ptr, label) \
210 __put_user_goto(x, ptr, "q", "", "er", label)
fe40c0af 211#define __put_user_asm_ex_u64(x, addr) \
ebe119cd 212 __put_user_asm_ex(x, addr, "q", "", "er")
e30a44fd 213#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
dc70ddf4
GC
214#endif
215
e30a44fd
GC
216extern void __put_user_bad(void);
217
218/*
219 * Strange magic calling convention: pointer in %ecx,
220 * value in %eax(:%edx), return value in %eax. clobbers %rbx
221 */
222extern void __put_user_1(void);
223extern void __put_user_2(void);
224extern void __put_user_4(void);
225extern void __put_user_8(void);
226
e30a44fd 227/**
bc8ff3ca 228 * put_user - Write a simple value into user space.
e30a44fd
GC
229 * @x: Value to copy to user space.
230 * @ptr: Destination address, in user space.
231 *
b3c395ef
DH
232 * Context: User context only. This function may sleep if pagefaults are
233 * enabled.
e30a44fd
GC
234 *
235 * This macro copies a single simple value from kernel space to user
236 * space. It supports simple types like char and int, but not larger
237 * data types like structures or arrays.
238 *
239 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
240 * to the result of dereferencing @ptr.
241 *
bc8ff3ca 242 * Return: zero on success, or -EFAULT on error.
e30a44fd
GC
243 */
244#define put_user(x, ptr) \
245({ \
246 int __ret_pu; \
247 __typeof__(*(ptr)) __pu_val; \
248 __chk_user_ptr(ptr); \
3ee1afa3 249 might_fault(); \
e30a44fd
GC
250 __pu_val = x; \
251 switch (sizeof(*(ptr))) { \
252 case 1: \
253 __put_user_x(1, __pu_val, ptr, __ret_pu); \
254 break; \
255 case 2: \
256 __put_user_x(2, __pu_val, ptr, __ret_pu); \
257 break; \
258 case 4: \
259 __put_user_x(4, __pu_val, ptr, __ret_pu); \
260 break; \
261 case 8: \
262 __put_user_x8(__pu_val, ptr, __ret_pu); \
263 break; \
264 default: \
265 __put_user_x(X, __pu_val, ptr, __ret_pu); \
266 break; \
267 } \
a76cf66e 268 __builtin_expect(__ret_pu, 0); \
e30a44fd
GC
269})
270
a959dc88 271#define __put_user_size(x, ptr, size, label) \
dc70ddf4 272do { \
dc70ddf4
GC
273 __chk_user_ptr(ptr); \
274 switch (size) { \
275 case 1: \
a959dc88 276 __put_user_goto(x, ptr, "b", "b", "iq", label); \
dc70ddf4
GC
277 break; \
278 case 2: \
a959dc88 279 __put_user_goto(x, ptr, "w", "w", "ir", label); \
dc70ddf4
GC
280 break; \
281 case 4: \
a959dc88 282 __put_user_goto(x, ptr, "l", "k", "ir", label); \
dc70ddf4
GC
283 break; \
284 case 8: \
2a418cf3 285 __put_user_goto_u64(x, ptr, label); \
dc70ddf4
GC
286 break; \
287 default: \
288 __put_user_bad(); \
289 } \
290} while (0)
291
11f1a4b9
LT
292/*
293 * This doesn't do __uaccess_begin/end - the exception handling
294 * around it must do that.
295 */
fe40c0af
HS
296#define __put_user_size_ex(x, ptr, size) \
297do { \
298 __chk_user_ptr(ptr); \
299 switch (size) { \
300 case 1: \
301 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
302 break; \
303 case 2: \
304 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
305 break; \
306 case 4: \
307 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
308 break; \
309 case 8: \
310 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
311 break; \
312 default: \
313 __put_user_bad(); \
314 } \
315} while (0)
316
3f168221 317#ifdef CONFIG_X86_32
b2f68038
BL
318#define __get_user_asm_u64(x, ptr, retval, errret) \
319({ \
320 __typeof__(ptr) __ptr = (ptr); \
33c9e972 321 asm volatile("\n" \
b2f68038
BL
322 "1: movl %2,%%eax\n" \
323 "2: movl %3,%%edx\n" \
33c9e972 324 "3:\n" \
b2f68038
BL
325 ".section .fixup,\"ax\"\n" \
326 "4: mov %4,%0\n" \
327 " xorl %%eax,%%eax\n" \
328 " xorl %%edx,%%edx\n" \
329 " jmp 3b\n" \
330 ".previous\n" \
75045f77
JH
331 _ASM_EXTABLE_UA(1b, 4b) \
332 _ASM_EXTABLE_UA(2b, 4b) \
33c9e972 333 : "=r" (retval), "=&A"(x) \
5ac751d9 334 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
b2f68038
BL
335 "i" (errret), "0" (retval)); \
336})
337
fe40c0af 338#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
3f168221
GC
339#else
340#define __get_user_asm_u64(x, ptr, retval, errret) \
341 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
fe40c0af
HS
342#define __get_user_asm_ex_u64(x, ptr) \
343 __get_user_asm_ex(x, ptr, "q", "", "=r")
3f168221
GC
344#endif
345
346#define __get_user_size(x, ptr, size, retval, errret) \
347do { \
348 retval = 0; \
349 __chk_user_ptr(ptr); \
350 switch (size) { \
351 case 1: \
352 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
353 break; \
354 case 2: \
355 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
356 break; \
357 case 4: \
358 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
359 break; \
360 case 8: \
361 __get_user_asm_u64(x, ptr, retval, errret); \
362 break; \
363 default: \
364 (x) = __get_user_bad(); \
365 } \
366} while (0)
367
368#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11f1a4b9 369 asm volatile("\n" \
63bcff2a 370 "1: mov"itype" %2,%"rtype"1\n" \
11f1a4b9 371 "2:\n" \
3f168221
GC
372 ".section .fixup,\"ax\"\n" \
373 "3: mov %3,%0\n" \
374 " xor"itype" %"rtype"1,%"rtype"1\n" \
375 " jmp 2b\n" \
376 ".previous\n" \
75045f77 377 _ASM_EXTABLE_UA(1b, 3b) \
3f168221
GC
378 : "=r" (err), ltype(x) \
379 : "m" (__m(addr)), "i" (errret), "0" (err))
380
122b05dd
AV
381#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
382 asm volatile("\n" \
383 "1: mov"itype" %2,%"rtype"1\n" \
384 "2:\n" \
385 ".section .fixup,\"ax\"\n" \
386 "3: mov %3,%0\n" \
387 " jmp 2b\n" \
388 ".previous\n" \
75045f77 389 _ASM_EXTABLE_UA(1b, 3b) \
122b05dd
AV
390 : "=r" (err), ltype(x) \
391 : "m" (__m(addr)), "i" (errret), "0" (err))
392
11f1a4b9
LT
393/*
394 * This doesn't do __uaccess_begin/end - the exception handling
395 * around it must do that.
396 */
fe40c0af
HS
397#define __get_user_size_ex(x, ptr, size) \
398do { \
399 __chk_user_ptr(ptr); \
400 switch (size) { \
401 case 1: \
402 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
403 break; \
404 case 2: \
405 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
406 break; \
407 case 4: \
408 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
409 break; \
410 case 8: \
411 __get_user_asm_ex_u64(x, ptr); \
412 break; \
413 default: \
414 (x) = __get_user_bad(); \
415 } \
416} while (0)
417
418#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
419 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
420 "2:\n" \
1c109fab
AV
421 ".section .fixup,\"ax\"\n" \
422 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
423 " jmp 2b\n" \
424 ".previous\n" \
425 _ASM_EXTABLE_EX(1b, 3b) \
fe40c0af
HS
426 : ltype(x) : "m" (__m(addr)))
427
dc70ddf4
GC
428#define __put_user_nocheck(x, ptr, size) \
429({ \
a959dc88
LT
430 __label__ __pu_label; \
431 int __pu_err = -EFAULT; \
6ae86561
PZ
432 __typeof__(*(ptr)) __pu_val = (x); \
433 __typeof__(ptr) __pu_ptr = (ptr); \
434 __typeof__(size) __pu_size = (size); \
11f1a4b9 435 __uaccess_begin(); \
6ae86561 436 __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \
a959dc88
LT
437 __pu_err = 0; \
438__pu_label: \
11f1a4b9 439 __uaccess_end(); \
a76cf66e 440 __builtin_expect(__pu_err, 0); \
dc70ddf4
GC
441})
442
3f168221
GC
443#define __get_user_nocheck(x, ptr, size) \
444({ \
16855f87 445 int __gu_err; \
b2f68038 446 __inttype(*(ptr)) __gu_val; \
9b8bd476
PZ
447 __typeof__(ptr) __gu_ptr = (ptr); \
448 __typeof__(size) __gu_size = (size); \
304ec1b0 449 __uaccess_begin_nospec(); \
9b8bd476 450 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
11f1a4b9 451 __uaccess_end(); \
3f168221 452 (x) = (__force __typeof__(*(ptr)))__gu_val; \
a76cf66e 453 __builtin_expect(__gu_err, 0); \
3f168221 454})
dc70ddf4
GC
455
456/* FIXME: this hack is definitely wrong -AK */
457struct __large_struct { unsigned long buf[100]; };
458#define __m(x) (*(struct __large_struct __user *)(x))
459
460/*
461 * Tell gcc we read from memory instead of writing: this is because
462 * we do not write to any memory gcc knows about, so there are no
463 * aliasing issues.
464 */
4a789213
LT
465#define __put_user_goto(x, addr, itype, rtype, ltype, label) \
466 asm_volatile_goto("\n" \
467 "1: mov"itype" %"rtype"0,%1\n" \
468 _ASM_EXTABLE_UA(1b, %l2) \
469 : : ltype(x), "m" (__m(addr)) \
470 : : label)
471
472#define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
473 ({ __label__ __puflab; \
474 int __pufret = errret; \
475 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
476 __pufret = 0; \
477 __puflab: __pufret; })
478
479#define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
480 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
481} while (0)
fe40c0af
HS
482
483#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
484 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
485 "2:\n" \
535c0c34 486 _ASM_EXTABLE_EX(1b, 2b) \
fe40c0af
HS
487 : : ltype(x), "m" (__m(addr)))
488
489/*
490 * uaccess_try and catch
491 */
492#define uaccess_try do { \
dfa9a942 493 current->thread.uaccess_err = 0; \
11f1a4b9 494 __uaccess_begin(); \
fe40c0af
HS
495 barrier();
496
b3bbfb3f
DW
497#define uaccess_try_nospec do { \
498 current->thread.uaccess_err = 0; \
499 __uaccess_begin_nospec(); \
500
fe40c0af 501#define uaccess_catch(err) \
11f1a4b9 502 __uaccess_end(); \
dfa9a942 503 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
fe40c0af
HS
504} while (0)
505
8cb834e9 506/**
bc8ff3ca 507 * __get_user - Get a simple variable from user space, with less checking.
8cb834e9
GC
508 * @x: Variable to store result.
509 * @ptr: Source address, in user space.
510 *
b3c395ef
DH
511 * Context: User context only. This function may sleep if pagefaults are
512 * enabled.
8cb834e9
GC
513 *
514 * This macro copies a single simple variable from user space to kernel
515 * space. It supports simple types like char and int, but not larger
516 * data types like structures or arrays.
517 *
518 * @ptr must have pointer-to-simple-variable type, and the result of
519 * dereferencing @ptr must be assignable to @x without a cast.
520 *
521 * Caller must check the pointer with access_ok() before calling this
522 * function.
523 *
bc8ff3ca 524 * Return: zero on success, or -EFAULT on error.
8cb834e9
GC
525 * On error, the variable @x is set to zero.
526 */
527
528#define __get_user(x, ptr) \
529 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
fe40c0af 530
8cb834e9 531/**
bc8ff3ca 532 * __put_user - Write a simple value into user space, with less checking.
8cb834e9
GC
533 * @x: Value to copy to user space.
534 * @ptr: Destination address, in user space.
535 *
b3c395ef
DH
536 * Context: User context only. This function may sleep if pagefaults are
537 * enabled.
8cb834e9
GC
538 *
539 * This macro copies a single simple value from kernel space to user
540 * space. It supports simple types like char and int, but not larger
541 * data types like structures or arrays.
542 *
543 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
544 * to the result of dereferencing @ptr.
545 *
546 * Caller must check the pointer with access_ok() before calling this
547 * function.
548 *
bc8ff3ca 549 * Return: zero on success, or -EFAULT on error.
8cb834e9
GC
550 */
551
552#define __put_user(x, ptr) \
553 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
dc70ddf4 554
fe40c0af
HS
555/*
556 * {get|put}_user_try and catch
557 *
558 * get_user_try {
559 * get_user_ex(...);
560 * } get_user_catch(err)
561 */
304ec1b0 562#define get_user_try uaccess_try_nospec
fe40c0af 563#define get_user_catch(err) uaccess_catch(err)
fe40c0af
HS
564
565#define get_user_ex(x, ptr) do { \
566 unsigned long __gue_val; \
567 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
568 (x) = (__force __typeof__(*(ptr)))__gue_val; \
569} while (0)
570
019a1369
HS
571#define put_user_try uaccess_try
572#define put_user_catch(err) uaccess_catch(err)
573
fe40c0af
HS
574#define put_user_ex(x, ptr) \
575 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
576
1ac2e6ca
RR
577extern unsigned long
578copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
92ae03f2
LT
579extern __must_check long
580strncpy_from_user(char *dst, const char __user *src, long count);
1ac2e6ca 581
5723aa99
LT
582extern __must_check long strnlen_user(const char __user *str, long n);
583
a052858f
PA
584unsigned long __must_check clear_user(void __user *mem, unsigned long len);
585unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
586
f09174c5
QR
587extern void __cmpxchg_wrong_size(void)
588 __compiletime_error("Bad argument size for cmpxchg");
589
590#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
591({ \
592 int __ret = 0; \
f09174c5
QR
593 __typeof__(*(ptr)) __old = (old); \
594 __typeof__(*(ptr)) __new = (new); \
304ec1b0 595 __uaccess_begin_nospec(); \
f09174c5
QR
596 switch (size) { \
597 case 1: \
598 { \
11f1a4b9 599 asm volatile("\n" \
f09174c5 600 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
11f1a4b9 601 "2:\n" \
f09174c5
QR
602 "\t.section .fixup, \"ax\"\n" \
603 "3:\tmov %3, %0\n" \
604 "\tjmp 2b\n" \
605 "\t.previous\n" \
75045f77 606 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
607 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
608 : "i" (-EFAULT), "q" (__new), "1" (__old) \
609 : "memory" \
610 ); \
611 break; \
612 } \
613 case 2: \
614 { \
11f1a4b9 615 asm volatile("\n" \
f09174c5 616 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
11f1a4b9 617 "2:\n" \
f09174c5
QR
618 "\t.section .fixup, \"ax\"\n" \
619 "3:\tmov %3, %0\n" \
620 "\tjmp 2b\n" \
621 "\t.previous\n" \
75045f77 622 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
623 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
624 : "i" (-EFAULT), "r" (__new), "1" (__old) \
625 : "memory" \
626 ); \
627 break; \
628 } \
629 case 4: \
630 { \
11f1a4b9 631 asm volatile("\n" \
f09174c5 632 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
11f1a4b9 633 "2:\n" \
f09174c5
QR
634 "\t.section .fixup, \"ax\"\n" \
635 "3:\tmov %3, %0\n" \
636 "\tjmp 2b\n" \
637 "\t.previous\n" \
75045f77 638 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
639 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
640 : "i" (-EFAULT), "r" (__new), "1" (__old) \
641 : "memory" \
642 ); \
643 break; \
644 } \
645 case 8: \
646 { \
647 if (!IS_ENABLED(CONFIG_X86_64)) \
648 __cmpxchg_wrong_size(); \
649 \
11f1a4b9 650 asm volatile("\n" \
f09174c5 651 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
11f1a4b9 652 "2:\n" \
f09174c5
QR
653 "\t.section .fixup, \"ax\"\n" \
654 "3:\tmov %3, %0\n" \
655 "\tjmp 2b\n" \
656 "\t.previous\n" \
75045f77 657 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
658 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
659 : "i" (-EFAULT), "r" (__new), "1" (__old) \
660 : "memory" \
661 ); \
662 break; \
663 } \
664 default: \
665 __cmpxchg_wrong_size(); \
666 } \
11f1a4b9 667 __uaccess_end(); \
a6cbfbe6 668 *(uval) = __old; \
f09174c5
QR
669 __ret; \
670})
671
672#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
673({ \
96d4f267 674 access_ok((ptr), sizeof(*(ptr))) ? \
f09174c5
QR
675 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
676 (old), (new), sizeof(*(ptr))) : \
677 -EFAULT; \
678})
679
8bc7de0c
GC
680/*
681 * movsl can be slow when source and dest are not both 8-byte aligned
682 */
683#ifdef CONFIG_X86_INTEL_USERCOPY
684extern struct movsl_mask {
685 int mask;
686} ____cacheline_aligned_in_smp movsl_mask;
687#endif
688
22cac167
GC
689#define ARCH_HAS_NOCACHE_UACCESS 1
690
96a388de 691#ifdef CONFIG_X86_32
a1ce3928 692# include <asm/uaccess_32.h>
96a388de 693#else
a1ce3928 694# include <asm/uaccess_64.h>
96a388de 695#endif
ca233862 696
10013ebb
AK
697/*
698 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
699 * nested NMI paths are careful to preserve CR2.
700 *
701 * Caller must use pagefault_enable/disable, or run in interrupt context,
702 * and also do a uaccess_ok() check
703 */
704#define __copy_from_user_nmi __copy_from_user_inatomic
705
5b24a7a2
LT
706/*
707 * The "unsafe" user accesses aren't really "unsafe", but the naming
708 * is a big fat warning: you have to not only do the access_ok()
709 * checking before using them, but you have to surround them with the
710 * user_access_begin/end() pair.
711 */
b7f89bfe 712static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
594cc251
LT
713{
714 if (unlikely(!access_ok(ptr,len)))
715 return 0;
6e693b3f 716 __uaccess_begin_nospec();
594cc251
LT
717 return 1;
718}
719#define user_access_begin(a,b) user_access_begin(a,b)
5b24a7a2
LT
720#define user_access_end() __uaccess_end()
721
e74deb11
PZ
722#define user_access_save() smap_save()
723#define user_access_restore(x) smap_restore(x)
724
a959dc88
LT
725#define unsafe_put_user(x, ptr, label) \
726 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
5b24a7a2 727
1bd4403d
LT
728#define unsafe_get_user(x, ptr, err_label) \
729do { \
5b24a7a2 730 int __gu_err; \
334a023e 731 __inttype(*(ptr)) __gu_val; \
5b24a7a2
LT
732 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
733 (x) = (__force __typeof__(*(ptr)))__gu_val; \
1bd4403d
LT
734 if (unlikely(__gu_err)) goto err_label; \
735} while (0)
5b24a7a2 736
c512c691
LT
737/*
738 * We want the unsafe accessors to always be inlined and use
739 * the error labels - thus the macro games.
740 */
741#define unsafe_copy_loop(dst, src, len, type, label) \
742 while (len >= sizeof(type)) { \
743 unsafe_put_user(*(type *)src,(type __user *)dst,label); \
744 dst += sizeof(type); \
745 src += sizeof(type); \
746 len -= sizeof(type); \
747 }
748
749#define unsafe_copy_to_user(_dst,_src,_len,label) \
750do { \
751 char __user *__ucu_dst = (_dst); \
752 const char *__ucu_src = (_src); \
753 size_t __ucu_len = (_len); \
754 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
755 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
756 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
757 unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
758} while (0)
759
1965aae3 760#endif /* _ASM_X86_UACCESS_H */
8174c430 761