x86/platform/UV: Use efi_runtime_lock to serialise BIOS calls
[linux-2.6-block.git] / arch / x86 / include / asm / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
ca233862
GC
4/*
5 * User space memory access functions
6 */
ca233862 7#include <linux/compiler.h>
1771c6e1 8#include <linux/kasan-checks.h>
ca233862
GC
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
63bcff2a 12#include <asm/smap.h>
45caf470 13#include <asm/extable.h>
ca233862 14
ca233862
GC
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
9063c61f 26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
ca233862
GC
27
28#define get_ds() (KERNEL_DS)
13d4ea09 29#define get_fs() (current->thread.addr_limit)
5ea0727b
TG
30static inline void set_fs(mm_segment_t fs)
31{
32 current->thread.addr_limit = fs;
33 /* On user-mode return, check fs is correct */
34 set_thread_flag(TIF_FSCHECK);
35}
ca233862
GC
36
37#define segment_eq(a, b) ((a).seg == (b).seg)
38
13d4ea09 39#define user_addr_max() (current->thread.addr_limit.seg)
bc6ca7b3
AS
40#define __addr_ok(addr) \
41 ((unsigned long __force)(addr) < user_addr_max())
002ca169 42
ca233862
GC
43/*
44 * Test whether a block of memory is a valid user space address.
45 * Returns 0 if the range is valid, nonzero otherwise.
ca233862 46 */
a740576a 47static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
c5fe5d80
LT
48{
49 /*
50 * If we have used "sizeof()" for the size,
51 * we know it won't overflow the limit (but
52 * it might overflow the 'addr', so it's
53 * important to subtract the size from the
54 * limit, not add it to the address).
55 */
56 if (__builtin_constant_p(size))
7e0f51cb 57 return unlikely(addr > limit - size);
c5fe5d80
LT
58
59 /* Arbitrary sizes? Be careful about overflow */
60 addr += size;
7e0f51cb 61 if (unlikely(addr < size))
a740576a 62 return true;
7e0f51cb 63 return unlikely(addr > limit);
c5fe5d80 64}
ca233862 65
bc6ca7b3 66#define __range_not_ok(addr, size, limit) \
ca233862 67({ \
ca233862 68 __chk_user_ptr(addr); \
c5fe5d80 69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
ca233862
GC
70})
71
7c478895
PZ
72#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
73# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
74#else
75# define WARN_ON_IN_IRQ()
76#endif
77
ca233862
GC
78/**
79 * access_ok: - Checks if a user space pointer is valid
ca233862
GC
80 * @addr: User space pointer to start of block to check
81 * @size: Size of block to check
82 *
b3c395ef
DH
83 * Context: User context only. This function may sleep if pagefaults are
84 * enabled.
ca233862
GC
85 *
86 * Checks if a pointer to a block of memory in user space is valid.
87 *
88 * Returns true (nonzero) if the memory block may be valid, false (zero)
89 * if it is definitely invalid.
90 *
91 * Note that, depending on architecture, this function probably just
92 * checks that the pointer is in the user space range - after calling
93 * this function, memory access functions may still return -EFAULT.
94 */
96d4f267 95#define access_ok(addr, size) \
7c478895
PZ
96({ \
97 WARN_ON_IN_IRQ(); \
98 likely(!__range_not_ok(addr, size, user_addr_max())); \
99})
ca233862 100
ca233862
GC
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
11f1a4b9
LT
122#define __uaccess_begin() stac()
123#define __uaccess_end() clac()
b3bbfb3f
DW
124#define __uaccess_begin_nospec() \
125({ \
126 stac(); \
127 barrier_nospec(); \
128})
11f1a4b9 129
3578baae
PA
130/*
131 * This is a type: either unsigned long, if the argument fits into
132 * that type, or otherwise unsigned long long.
133 */
134#define __inttype(x) \
135__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
865e5b76
GC
136
137/**
138 * get_user: - Get a simple variable from user space.
139 * @x: Variable to store result.
140 * @ptr: Source address, in user space.
141 *
b3c395ef
DH
142 * Context: User context only. This function may sleep if pagefaults are
143 * enabled.
865e5b76
GC
144 *
145 * This macro copies a single simple variable from user space to kernel
146 * space. It supports simple types like char and int, but not larger
147 * data types like structures or arrays.
148 *
149 * @ptr must have pointer-to-simple-variable type, and the result of
150 * dereferencing @ptr must be assignable to @x without a cast.
151 *
152 * Returns zero on success, or -EFAULT on error.
153 * On error, the variable @x is set to zero.
ff52c3b0
PA
154 */
155/*
3578baae
PA
156 * Careful: we have to cast the result to the type of the pointer
157 * for sign reasons.
ff52c3b0 158 *
f69fa9a9 159 * The use of _ASM_DX as the register specifier is a bit of a
ff52c3b0
PA
160 * simplification, as gcc only cares about it as the starting point
161 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
162 * (%ecx being the next register in gcc's x86 register sequence), and
163 * %rdx on 64 bits.
f69fa9a9
PA
164 *
165 * Clang/LLVM cares about the size of the register, but still wants
166 * the base register for something that ends up being a pair.
865e5b76 167 */
865e5b76
GC
168#define get_user(x, ptr) \
169({ \
170 int __ret_gu; \
bdfc017e 171 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
865e5b76 172 __chk_user_ptr(ptr); \
3ee1afa3 173 might_fault(); \
f05058c4 174 asm volatile("call __get_user_%P4" \
f5caf621
JP
175 : "=a" (__ret_gu), "=r" (__val_gu), \
176 ASM_CALL_CONSTRAINT \
3578baae 177 : "0" (ptr), "i" (sizeof(*(ptr)))); \
e182c570 178 (x) = (__force __typeof__(*(ptr))) __val_gu; \
a76cf66e 179 __builtin_expect(__ret_gu, 0); \
865e5b76
GC
180})
181
e30a44fd
GC
182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
4d5d7838 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
e30a44fd
GC
185
186
187
dc70ddf4 188#ifdef CONFIG_X86_32
a959dc88
LT
189#define __put_user_goto_u64(x, addr, label) \
190 asm_volatile_goto("\n" \
191 "1: movl %%eax,0(%1)\n" \
192 "2: movl %%edx,4(%1)\n" \
193 _ASM_EXTABLE_UA(1b, %l2) \
194 _ASM_EXTABLE_UA(2b, %l2) \
195 : : "A" (x), "r" (addr) \
196 : : label)
e30a44fd 197
fe40c0af 198#define __put_user_asm_ex_u64(x, addr) \
11f1a4b9 199 asm volatile("\n" \
63bcff2a 200 "1: movl %%eax,0(%1)\n" \
fe40c0af 201 "2: movl %%edx,4(%1)\n" \
11f1a4b9 202 "3:" \
535c0c34
PA
203 _ASM_EXTABLE_EX(1b, 2b) \
204 _ASM_EXTABLE_EX(2b, 3b) \
fe40c0af
HS
205 : : "A" (x), "r" (addr))
206
e30a44fd
GC
207#define __put_user_x8(x, ptr, __ret_pu) \
208 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
209 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
dc70ddf4 210#else
a959dc88
LT
211#define __put_user_goto_u64(x, ptr, label) \
212 __put_user_goto(x, ptr, "q", "", "er", label)
fe40c0af 213#define __put_user_asm_ex_u64(x, addr) \
ebe119cd 214 __put_user_asm_ex(x, addr, "q", "", "er")
e30a44fd 215#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
dc70ddf4
GC
216#endif
217
e30a44fd
GC
218extern void __put_user_bad(void);
219
220/*
221 * Strange magic calling convention: pointer in %ecx,
222 * value in %eax(:%edx), return value in %eax. clobbers %rbx
223 */
224extern void __put_user_1(void);
225extern void __put_user_2(void);
226extern void __put_user_4(void);
227extern void __put_user_8(void);
228
e30a44fd
GC
229/**
230 * put_user: - Write a simple value into user space.
231 * @x: Value to copy to user space.
232 * @ptr: Destination address, in user space.
233 *
b3c395ef
DH
234 * Context: User context only. This function may sleep if pagefaults are
235 * enabled.
e30a44fd
GC
236 *
237 * This macro copies a single simple value from kernel space to user
238 * space. It supports simple types like char and int, but not larger
239 * data types like structures or arrays.
240 *
241 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
242 * to the result of dereferencing @ptr.
243 *
244 * Returns zero on success, or -EFAULT on error.
245 */
246#define put_user(x, ptr) \
247({ \
248 int __ret_pu; \
249 __typeof__(*(ptr)) __pu_val; \
250 __chk_user_ptr(ptr); \
3ee1afa3 251 might_fault(); \
e30a44fd
GC
252 __pu_val = x; \
253 switch (sizeof(*(ptr))) { \
254 case 1: \
255 __put_user_x(1, __pu_val, ptr, __ret_pu); \
256 break; \
257 case 2: \
258 __put_user_x(2, __pu_val, ptr, __ret_pu); \
259 break; \
260 case 4: \
261 __put_user_x(4, __pu_val, ptr, __ret_pu); \
262 break; \
263 case 8: \
264 __put_user_x8(__pu_val, ptr, __ret_pu); \
265 break; \
266 default: \
267 __put_user_x(X, __pu_val, ptr, __ret_pu); \
268 break; \
269 } \
a76cf66e 270 __builtin_expect(__ret_pu, 0); \
e30a44fd
GC
271})
272
a959dc88 273#define __put_user_size(x, ptr, size, label) \
dc70ddf4 274do { \
dc70ddf4
GC
275 __chk_user_ptr(ptr); \
276 switch (size) { \
277 case 1: \
a959dc88 278 __put_user_goto(x, ptr, "b", "b", "iq", label); \
dc70ddf4
GC
279 break; \
280 case 2: \
a959dc88 281 __put_user_goto(x, ptr, "w", "w", "ir", label); \
dc70ddf4
GC
282 break; \
283 case 4: \
a959dc88 284 __put_user_goto(x, ptr, "l", "k", "ir", label); \
dc70ddf4
GC
285 break; \
286 case 8: \
a959dc88 287 __put_user_goto_u64((__typeof__(*ptr))(x), ptr, label); \
dc70ddf4
GC
288 break; \
289 default: \
290 __put_user_bad(); \
291 } \
292} while (0)
293
11f1a4b9
LT
294/*
295 * This doesn't do __uaccess_begin/end - the exception handling
296 * around it must do that.
297 */
fe40c0af
HS
298#define __put_user_size_ex(x, ptr, size) \
299do { \
300 __chk_user_ptr(ptr); \
301 switch (size) { \
302 case 1: \
303 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
304 break; \
305 case 2: \
306 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
307 break; \
308 case 4: \
309 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
310 break; \
311 case 8: \
312 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
313 break; \
314 default: \
315 __put_user_bad(); \
316 } \
317} while (0)
318
3f168221 319#ifdef CONFIG_X86_32
b2f68038
BL
320#define __get_user_asm_u64(x, ptr, retval, errret) \
321({ \
322 __typeof__(ptr) __ptr = (ptr); \
33c9e972 323 asm volatile("\n" \
b2f68038
BL
324 "1: movl %2,%%eax\n" \
325 "2: movl %3,%%edx\n" \
33c9e972 326 "3:\n" \
b2f68038
BL
327 ".section .fixup,\"ax\"\n" \
328 "4: mov %4,%0\n" \
329 " xorl %%eax,%%eax\n" \
330 " xorl %%edx,%%edx\n" \
331 " jmp 3b\n" \
332 ".previous\n" \
75045f77
JH
333 _ASM_EXTABLE_UA(1b, 4b) \
334 _ASM_EXTABLE_UA(2b, 4b) \
33c9e972 335 : "=r" (retval), "=&A"(x) \
5ac751d9 336 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
b2f68038
BL
337 "i" (errret), "0" (retval)); \
338})
339
fe40c0af 340#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
3f168221
GC
341#else
342#define __get_user_asm_u64(x, ptr, retval, errret) \
343 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
fe40c0af
HS
344#define __get_user_asm_ex_u64(x, ptr) \
345 __get_user_asm_ex(x, ptr, "q", "", "=r")
3f168221
GC
346#endif
347
348#define __get_user_size(x, ptr, size, retval, errret) \
349do { \
350 retval = 0; \
351 __chk_user_ptr(ptr); \
352 switch (size) { \
353 case 1: \
354 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
355 break; \
356 case 2: \
357 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
358 break; \
359 case 4: \
360 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
361 break; \
362 case 8: \
363 __get_user_asm_u64(x, ptr, retval, errret); \
364 break; \
365 default: \
366 (x) = __get_user_bad(); \
367 } \
368} while (0)
369
370#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11f1a4b9 371 asm volatile("\n" \
63bcff2a 372 "1: mov"itype" %2,%"rtype"1\n" \
11f1a4b9 373 "2:\n" \
3f168221
GC
374 ".section .fixup,\"ax\"\n" \
375 "3: mov %3,%0\n" \
376 " xor"itype" %"rtype"1,%"rtype"1\n" \
377 " jmp 2b\n" \
378 ".previous\n" \
75045f77 379 _ASM_EXTABLE_UA(1b, 3b) \
3f168221
GC
380 : "=r" (err), ltype(x) \
381 : "m" (__m(addr)), "i" (errret), "0" (err))
382
122b05dd
AV
383#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
384 asm volatile("\n" \
385 "1: mov"itype" %2,%"rtype"1\n" \
386 "2:\n" \
387 ".section .fixup,\"ax\"\n" \
388 "3: mov %3,%0\n" \
389 " jmp 2b\n" \
390 ".previous\n" \
75045f77 391 _ASM_EXTABLE_UA(1b, 3b) \
122b05dd
AV
392 : "=r" (err), ltype(x) \
393 : "m" (__m(addr)), "i" (errret), "0" (err))
394
11f1a4b9
LT
395/*
396 * This doesn't do __uaccess_begin/end - the exception handling
397 * around it must do that.
398 */
fe40c0af
HS
399#define __get_user_size_ex(x, ptr, size) \
400do { \
401 __chk_user_ptr(ptr); \
402 switch (size) { \
403 case 1: \
404 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
405 break; \
406 case 2: \
407 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
408 break; \
409 case 4: \
410 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
411 break; \
412 case 8: \
413 __get_user_asm_ex_u64(x, ptr); \
414 break; \
415 default: \
416 (x) = __get_user_bad(); \
417 } \
418} while (0)
419
420#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
421 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
422 "2:\n" \
1c109fab
AV
423 ".section .fixup,\"ax\"\n" \
424 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
425 " jmp 2b\n" \
426 ".previous\n" \
427 _ASM_EXTABLE_EX(1b, 3b) \
fe40c0af
HS
428 : ltype(x) : "m" (__m(addr)))
429
dc70ddf4
GC
430#define __put_user_nocheck(x, ptr, size) \
431({ \
a959dc88
LT
432 __label__ __pu_label; \
433 int __pu_err = -EFAULT; \
11f1a4b9 434 __uaccess_begin(); \
a959dc88
LT
435 __put_user_size((x), (ptr), (size), __pu_label); \
436 __pu_err = 0; \
437__pu_label: \
11f1a4b9 438 __uaccess_end(); \
a76cf66e 439 __builtin_expect(__pu_err, 0); \
dc70ddf4
GC
440})
441
3f168221
GC
442#define __get_user_nocheck(x, ptr, size) \
443({ \
16855f87 444 int __gu_err; \
b2f68038 445 __inttype(*(ptr)) __gu_val; \
304ec1b0 446 __uaccess_begin_nospec(); \
3f168221 447 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11f1a4b9 448 __uaccess_end(); \
3f168221 449 (x) = (__force __typeof__(*(ptr)))__gu_val; \
a76cf66e 450 __builtin_expect(__gu_err, 0); \
3f168221 451})
dc70ddf4
GC
452
453/* FIXME: this hack is definitely wrong -AK */
454struct __large_struct { unsigned long buf[100]; };
455#define __m(x) (*(struct __large_struct __user *)(x))
456
457/*
458 * Tell gcc we read from memory instead of writing: this is because
459 * we do not write to any memory gcc knows about, so there are no
460 * aliasing issues.
461 */
4a789213
LT
462#define __put_user_goto(x, addr, itype, rtype, ltype, label) \
463 asm_volatile_goto("\n" \
464 "1: mov"itype" %"rtype"0,%1\n" \
465 _ASM_EXTABLE_UA(1b, %l2) \
466 : : ltype(x), "m" (__m(addr)) \
467 : : label)
468
469#define __put_user_failed(x, addr, itype, rtype, ltype, errret) \
470 ({ __label__ __puflab; \
471 int __pufret = errret; \
472 __put_user_goto(x,addr,itype,rtype,ltype,__puflab); \
473 __pufret = 0; \
474 __puflab: __pufret; })
475
476#define __put_user_asm(x, addr, retval, itype, rtype, ltype, errret) do { \
477 retval = __put_user_failed(x, addr, itype, rtype, ltype, errret); \
478} while (0)
fe40c0af
HS
479
480#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
481 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
482 "2:\n" \
535c0c34 483 _ASM_EXTABLE_EX(1b, 2b) \
fe40c0af
HS
484 : : ltype(x), "m" (__m(addr)))
485
486/*
487 * uaccess_try and catch
488 */
489#define uaccess_try do { \
dfa9a942 490 current->thread.uaccess_err = 0; \
11f1a4b9 491 __uaccess_begin(); \
fe40c0af
HS
492 barrier();
493
b3bbfb3f
DW
494#define uaccess_try_nospec do { \
495 current->thread.uaccess_err = 0; \
496 __uaccess_begin_nospec(); \
497
fe40c0af 498#define uaccess_catch(err) \
11f1a4b9 499 __uaccess_end(); \
dfa9a942 500 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
fe40c0af
HS
501} while (0)
502
8cb834e9
GC
503/**
504 * __get_user: - Get a simple variable from user space, with less checking.
505 * @x: Variable to store result.
506 * @ptr: Source address, in user space.
507 *
b3c395ef
DH
508 * Context: User context only. This function may sleep if pagefaults are
509 * enabled.
8cb834e9
GC
510 *
511 * This macro copies a single simple variable from user space to kernel
512 * space. It supports simple types like char and int, but not larger
513 * data types like structures or arrays.
514 *
515 * @ptr must have pointer-to-simple-variable type, and the result of
516 * dereferencing @ptr must be assignable to @x without a cast.
517 *
518 * Caller must check the pointer with access_ok() before calling this
519 * function.
520 *
521 * Returns zero on success, or -EFAULT on error.
522 * On error, the variable @x is set to zero.
523 */
524
525#define __get_user(x, ptr) \
526 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
fe40c0af 527
8cb834e9
GC
528/**
529 * __put_user: - Write a simple value into user space, with less checking.
530 * @x: Value to copy to user space.
531 * @ptr: Destination address, in user space.
532 *
b3c395ef
DH
533 * Context: User context only. This function may sleep if pagefaults are
534 * enabled.
8cb834e9
GC
535 *
536 * This macro copies a single simple value from kernel space to user
537 * space. It supports simple types like char and int, but not larger
538 * data types like structures or arrays.
539 *
540 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
541 * to the result of dereferencing @ptr.
542 *
543 * Caller must check the pointer with access_ok() before calling this
544 * function.
545 *
546 * Returns zero on success, or -EFAULT on error.
547 */
548
549#define __put_user(x, ptr) \
550 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
dc70ddf4 551
fe40c0af
HS
552/*
553 * {get|put}_user_try and catch
554 *
555 * get_user_try {
556 * get_user_ex(...);
557 * } get_user_catch(err)
558 */
304ec1b0 559#define get_user_try uaccess_try_nospec
fe40c0af 560#define get_user_catch(err) uaccess_catch(err)
fe40c0af
HS
561
562#define get_user_ex(x, ptr) do { \
563 unsigned long __gue_val; \
564 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
565 (x) = (__force __typeof__(*(ptr)))__gue_val; \
566} while (0)
567
019a1369
HS
568#define put_user_try uaccess_try
569#define put_user_catch(err) uaccess_catch(err)
570
fe40c0af
HS
571#define put_user_ex(x, ptr) \
572 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
573
1ac2e6ca
RR
574extern unsigned long
575copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
92ae03f2
LT
576extern __must_check long
577strncpy_from_user(char *dst, const char __user *src, long count);
1ac2e6ca 578
5723aa99
LT
579extern __must_check long strnlen_user(const char __user *str, long n);
580
a052858f
PA
581unsigned long __must_check clear_user(void __user *mem, unsigned long len);
582unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
583
f09174c5
QR
584extern void __cmpxchg_wrong_size(void)
585 __compiletime_error("Bad argument size for cmpxchg");
586
587#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
588({ \
589 int __ret = 0; \
590 __typeof__(ptr) __uval = (uval); \
591 __typeof__(*(ptr)) __old = (old); \
592 __typeof__(*(ptr)) __new = (new); \
304ec1b0 593 __uaccess_begin_nospec(); \
f09174c5
QR
594 switch (size) { \
595 case 1: \
596 { \
11f1a4b9 597 asm volatile("\n" \
f09174c5 598 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
11f1a4b9 599 "2:\n" \
f09174c5
QR
600 "\t.section .fixup, \"ax\"\n" \
601 "3:\tmov %3, %0\n" \
602 "\tjmp 2b\n" \
603 "\t.previous\n" \
75045f77 604 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
605 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
606 : "i" (-EFAULT), "q" (__new), "1" (__old) \
607 : "memory" \
608 ); \
609 break; \
610 } \
611 case 2: \
612 { \
11f1a4b9 613 asm volatile("\n" \
f09174c5 614 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
11f1a4b9 615 "2:\n" \
f09174c5
QR
616 "\t.section .fixup, \"ax\"\n" \
617 "3:\tmov %3, %0\n" \
618 "\tjmp 2b\n" \
619 "\t.previous\n" \
75045f77 620 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
621 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
622 : "i" (-EFAULT), "r" (__new), "1" (__old) \
623 : "memory" \
624 ); \
625 break; \
626 } \
627 case 4: \
628 { \
11f1a4b9 629 asm volatile("\n" \
f09174c5 630 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
11f1a4b9 631 "2:\n" \
f09174c5
QR
632 "\t.section .fixup, \"ax\"\n" \
633 "3:\tmov %3, %0\n" \
634 "\tjmp 2b\n" \
635 "\t.previous\n" \
75045f77 636 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
637 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
638 : "i" (-EFAULT), "r" (__new), "1" (__old) \
639 : "memory" \
640 ); \
641 break; \
642 } \
643 case 8: \
644 { \
645 if (!IS_ENABLED(CONFIG_X86_64)) \
646 __cmpxchg_wrong_size(); \
647 \
11f1a4b9 648 asm volatile("\n" \
f09174c5 649 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
11f1a4b9 650 "2:\n" \
f09174c5
QR
651 "\t.section .fixup, \"ax\"\n" \
652 "3:\tmov %3, %0\n" \
653 "\tjmp 2b\n" \
654 "\t.previous\n" \
75045f77 655 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
656 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
657 : "i" (-EFAULT), "r" (__new), "1" (__old) \
658 : "memory" \
659 ); \
660 break; \
661 } \
662 default: \
663 __cmpxchg_wrong_size(); \
664 } \
11f1a4b9 665 __uaccess_end(); \
f09174c5
QR
666 *__uval = __old; \
667 __ret; \
668})
669
670#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
671({ \
96d4f267 672 access_ok((ptr), sizeof(*(ptr))) ? \
f09174c5
QR
673 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
674 (old), (new), sizeof(*(ptr))) : \
675 -EFAULT; \
676})
677
8bc7de0c
GC
678/*
679 * movsl can be slow when source and dest are not both 8-byte aligned
680 */
681#ifdef CONFIG_X86_INTEL_USERCOPY
682extern struct movsl_mask {
683 int mask;
684} ____cacheline_aligned_in_smp movsl_mask;
685#endif
686
22cac167
GC
687#define ARCH_HAS_NOCACHE_UACCESS 1
688
96a388de 689#ifdef CONFIG_X86_32
a1ce3928 690# include <asm/uaccess_32.h>
96a388de 691#else
a1ce3928 692# include <asm/uaccess_64.h>
96a388de 693#endif
ca233862 694
10013ebb
AK
695/*
696 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
697 * nested NMI paths are careful to preserve CR2.
698 *
699 * Caller must use pagefault_enable/disable, or run in interrupt context,
700 * and also do a uaccess_ok() check
701 */
702#define __copy_from_user_nmi __copy_from_user_inatomic
703
5b24a7a2
LT
704/*
705 * The "unsafe" user accesses aren't really "unsafe", but the naming
706 * is a big fat warning: you have to not only do the access_ok()
707 * checking before using them, but you have to surround them with the
708 * user_access_begin/end() pair.
709 */
594cc251
LT
710static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
711{
712 if (unlikely(!access_ok(ptr,len)))
713 return 0;
714 __uaccess_begin();
715 return 1;
716}
717#define user_access_begin(a,b) user_access_begin(a,b)
5b24a7a2
LT
718#define user_access_end() __uaccess_end()
719
a959dc88
LT
720#define unsafe_put_user(x, ptr, label) \
721 __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
5b24a7a2 722
1bd4403d
LT
723#define unsafe_get_user(x, ptr, err_label) \
724do { \
5b24a7a2 725 int __gu_err; \
334a023e 726 __inttype(*(ptr)) __gu_val; \
5b24a7a2
LT
727 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
728 (x) = (__force __typeof__(*(ptr)))__gu_val; \
1bd4403d
LT
729 if (unlikely(__gu_err)) goto err_label; \
730} while (0)
5b24a7a2 731
1965aae3 732#endif /* _ASM_X86_UACCESS_H */
8174c430 733