Remove 'type' argument from access_ok() function
[linux-2.6-block.git] / arch / x86 / include / asm / uaccess.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1965aae3
PA
2#ifndef _ASM_X86_UACCESS_H
3#define _ASM_X86_UACCESS_H
ca233862
GC
4/*
5 * User space memory access functions
6 */
ca233862 7#include <linux/compiler.h>
1771c6e1 8#include <linux/kasan-checks.h>
ca233862
GC
9#include <linux/string.h>
10#include <asm/asm.h>
11#include <asm/page.h>
63bcff2a 12#include <asm/smap.h>
45caf470 13#include <asm/extable.h>
ca233862 14
ca233862
GC
15/*
16 * The fs value determines whether argument validity checking should be
17 * performed or not. If get_fs() == USER_DS, checking is performed, with
18 * get_fs() == KERNEL_DS, checking is bypassed.
19 *
20 * For historical reasons, these macros are grossly misnamed.
21 */
22
23#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
24
25#define KERNEL_DS MAKE_MM_SEG(-1UL)
9063c61f 26#define USER_DS MAKE_MM_SEG(TASK_SIZE_MAX)
ca233862
GC
27
28#define get_ds() (KERNEL_DS)
13d4ea09 29#define get_fs() (current->thread.addr_limit)
5ea0727b
TG
30static inline void set_fs(mm_segment_t fs)
31{
32 current->thread.addr_limit = fs;
33 /* On user-mode return, check fs is correct */
34 set_thread_flag(TIF_FSCHECK);
35}
ca233862
GC
36
37#define segment_eq(a, b) ((a).seg == (b).seg)
38
13d4ea09 39#define user_addr_max() (current->thread.addr_limit.seg)
bc6ca7b3
AS
40#define __addr_ok(addr) \
41 ((unsigned long __force)(addr) < user_addr_max())
002ca169 42
ca233862
GC
43/*
44 * Test whether a block of memory is a valid user space address.
45 * Returns 0 if the range is valid, nonzero otherwise.
ca233862 46 */
a740576a 47static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
c5fe5d80
LT
48{
49 /*
50 * If we have used "sizeof()" for the size,
51 * we know it won't overflow the limit (but
52 * it might overflow the 'addr', so it's
53 * important to subtract the size from the
54 * limit, not add it to the address).
55 */
56 if (__builtin_constant_p(size))
7e0f51cb 57 return unlikely(addr > limit - size);
c5fe5d80
LT
58
59 /* Arbitrary sizes? Be careful about overflow */
60 addr += size;
7e0f51cb 61 if (unlikely(addr < size))
a740576a 62 return true;
7e0f51cb 63 return unlikely(addr > limit);
c5fe5d80 64}
ca233862 65
bc6ca7b3 66#define __range_not_ok(addr, size, limit) \
ca233862 67({ \
ca233862 68 __chk_user_ptr(addr); \
c5fe5d80 69 __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
ca233862
GC
70})
71
7c478895
PZ
72#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
73# define WARN_ON_IN_IRQ() WARN_ON_ONCE(!in_task())
74#else
75# define WARN_ON_IN_IRQ()
76#endif
77
ca233862
GC
78/**
79 * access_ok: - Checks if a user space pointer is valid
ca233862
GC
80 * @addr: User space pointer to start of block to check
81 * @size: Size of block to check
82 *
b3c395ef
DH
83 * Context: User context only. This function may sleep if pagefaults are
84 * enabled.
ca233862
GC
85 *
86 * Checks if a pointer to a block of memory in user space is valid.
87 *
88 * Returns true (nonzero) if the memory block may be valid, false (zero)
89 * if it is definitely invalid.
90 *
91 * Note that, depending on architecture, this function probably just
92 * checks that the pointer is in the user space range - after calling
93 * this function, memory access functions may still return -EFAULT.
94 */
96d4f267 95#define access_ok(addr, size) \
7c478895
PZ
96({ \
97 WARN_ON_IN_IRQ(); \
98 likely(!__range_not_ok(addr, size, user_addr_max())); \
99})
ca233862 100
ca233862
GC
101/*
102 * These are the main single-value transfer routines. They automatically
103 * use the right size if we just have the right pointer type.
104 *
105 * This gets kind of ugly. We want to return _two_ values in "get_user()"
106 * and yet we don't want to do any pointers, because that is too much
107 * of a performance impact. Thus we have a few rather ugly macros here,
108 * and hide all the ugliness from the user.
109 *
110 * The "__xxx" versions of the user access functions are versions that
111 * do not verify the address space, that must have been done previously
112 * with a separate "access_ok()" call (this is used when we do multiple
113 * accesses to the same area of user memory).
114 */
115
116extern int __get_user_1(void);
117extern int __get_user_2(void);
118extern int __get_user_4(void);
119extern int __get_user_8(void);
120extern int __get_user_bad(void);
121
11f1a4b9
LT
122#define __uaccess_begin() stac()
123#define __uaccess_end() clac()
b3bbfb3f
DW
124#define __uaccess_begin_nospec() \
125({ \
126 stac(); \
127 barrier_nospec(); \
128})
11f1a4b9 129
3578baae
PA
130/*
131 * This is a type: either unsigned long, if the argument fits into
132 * that type, or otherwise unsigned long long.
133 */
134#define __inttype(x) \
135__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
865e5b76
GC
136
137/**
138 * get_user: - Get a simple variable from user space.
139 * @x: Variable to store result.
140 * @ptr: Source address, in user space.
141 *
b3c395ef
DH
142 * Context: User context only. This function may sleep if pagefaults are
143 * enabled.
865e5b76
GC
144 *
145 * This macro copies a single simple variable from user space to kernel
146 * space. It supports simple types like char and int, but not larger
147 * data types like structures or arrays.
148 *
149 * @ptr must have pointer-to-simple-variable type, and the result of
150 * dereferencing @ptr must be assignable to @x without a cast.
151 *
152 * Returns zero on success, or -EFAULT on error.
153 * On error, the variable @x is set to zero.
ff52c3b0
PA
154 */
155/*
3578baae
PA
156 * Careful: we have to cast the result to the type of the pointer
157 * for sign reasons.
ff52c3b0 158 *
f69fa9a9 159 * The use of _ASM_DX as the register specifier is a bit of a
ff52c3b0
PA
160 * simplification, as gcc only cares about it as the starting point
161 * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
162 * (%ecx being the next register in gcc's x86 register sequence), and
163 * %rdx on 64 bits.
f69fa9a9
PA
164 *
165 * Clang/LLVM cares about the size of the register, but still wants
166 * the base register for something that ends up being a pair.
865e5b76 167 */
865e5b76
GC
168#define get_user(x, ptr) \
169({ \
170 int __ret_gu; \
bdfc017e 171 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
865e5b76 172 __chk_user_ptr(ptr); \
3ee1afa3 173 might_fault(); \
f05058c4 174 asm volatile("call __get_user_%P4" \
f5caf621
JP
175 : "=a" (__ret_gu), "=r" (__val_gu), \
176 ASM_CALL_CONSTRAINT \
3578baae 177 : "0" (ptr), "i" (sizeof(*(ptr)))); \
e182c570 178 (x) = (__force __typeof__(*(ptr))) __val_gu; \
a76cf66e 179 __builtin_expect(__ret_gu, 0); \
865e5b76
GC
180})
181
e30a44fd
GC
182#define __put_user_x(size, x, ptr, __ret_pu) \
183 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
4d5d7838 184 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
e30a44fd
GC
185
186
187
dc70ddf4 188#ifdef CONFIG_X86_32
18114f61 189#define __put_user_asm_u64(x, addr, err, errret) \
11f1a4b9 190 asm volatile("\n" \
63bcff2a 191 "1: movl %%eax,0(%2)\n" \
dc70ddf4 192 "2: movl %%edx,4(%2)\n" \
11f1a4b9 193 "3:" \
dc70ddf4
GC
194 ".section .fixup,\"ax\"\n" \
195 "4: movl %3,%0\n" \
196 " jmp 3b\n" \
197 ".previous\n" \
75045f77
JH
198 _ASM_EXTABLE_UA(1b, 4b) \
199 _ASM_EXTABLE_UA(2b, 4b) \
dc70ddf4 200 : "=r" (err) \
18114f61 201 : "A" (x), "r" (addr), "i" (errret), "0" (err))
e30a44fd 202
fe40c0af 203#define __put_user_asm_ex_u64(x, addr) \
11f1a4b9 204 asm volatile("\n" \
63bcff2a 205 "1: movl %%eax,0(%1)\n" \
fe40c0af 206 "2: movl %%edx,4(%1)\n" \
11f1a4b9 207 "3:" \
535c0c34
PA
208 _ASM_EXTABLE_EX(1b, 2b) \
209 _ASM_EXTABLE_EX(2b, 3b) \
fe40c0af
HS
210 : : "A" (x), "r" (addr))
211
e30a44fd
GC
212#define __put_user_x8(x, ptr, __ret_pu) \
213 asm volatile("call __put_user_8" : "=a" (__ret_pu) \
214 : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
dc70ddf4 215#else
18114f61 216#define __put_user_asm_u64(x, ptr, retval, errret) \
ebe119cd 217 __put_user_asm(x, ptr, retval, "q", "", "er", errret)
fe40c0af 218#define __put_user_asm_ex_u64(x, addr) \
ebe119cd 219 __put_user_asm_ex(x, addr, "q", "", "er")
e30a44fd 220#define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu)
dc70ddf4
GC
221#endif
222
e30a44fd
GC
223extern void __put_user_bad(void);
224
225/*
226 * Strange magic calling convention: pointer in %ecx,
227 * value in %eax(:%edx), return value in %eax. clobbers %rbx
228 */
229extern void __put_user_1(void);
230extern void __put_user_2(void);
231extern void __put_user_4(void);
232extern void __put_user_8(void);
233
e30a44fd
GC
234/**
235 * put_user: - Write a simple value into user space.
236 * @x: Value to copy to user space.
237 * @ptr: Destination address, in user space.
238 *
b3c395ef
DH
239 * Context: User context only. This function may sleep if pagefaults are
240 * enabled.
e30a44fd
GC
241 *
242 * This macro copies a single simple value from kernel space to user
243 * space. It supports simple types like char and int, but not larger
244 * data types like structures or arrays.
245 *
246 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
247 * to the result of dereferencing @ptr.
248 *
249 * Returns zero on success, or -EFAULT on error.
250 */
251#define put_user(x, ptr) \
252({ \
253 int __ret_pu; \
254 __typeof__(*(ptr)) __pu_val; \
255 __chk_user_ptr(ptr); \
3ee1afa3 256 might_fault(); \
e30a44fd
GC
257 __pu_val = x; \
258 switch (sizeof(*(ptr))) { \
259 case 1: \
260 __put_user_x(1, __pu_val, ptr, __ret_pu); \
261 break; \
262 case 2: \
263 __put_user_x(2, __pu_val, ptr, __ret_pu); \
264 break; \
265 case 4: \
266 __put_user_x(4, __pu_val, ptr, __ret_pu); \
267 break; \
268 case 8: \
269 __put_user_x8(__pu_val, ptr, __ret_pu); \
270 break; \
271 default: \
272 __put_user_x(X, __pu_val, ptr, __ret_pu); \
273 break; \
274 } \
a76cf66e 275 __builtin_expect(__ret_pu, 0); \
e30a44fd
GC
276})
277
dc70ddf4
GC
278#define __put_user_size(x, ptr, size, retval, errret) \
279do { \
280 retval = 0; \
281 __chk_user_ptr(ptr); \
282 switch (size) { \
283 case 1: \
284 __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \
285 break; \
286 case 2: \
287 __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \
288 break; \
289 case 4: \
4d5d7838 290 __put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
dc70ddf4
GC
291 break; \
292 case 8: \
18114f61
HS
293 __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
294 errret); \
dc70ddf4
GC
295 break; \
296 default: \
297 __put_user_bad(); \
298 } \
299} while (0)
300
11f1a4b9
LT
301/*
302 * This doesn't do __uaccess_begin/end - the exception handling
303 * around it must do that.
304 */
fe40c0af
HS
305#define __put_user_size_ex(x, ptr, size) \
306do { \
307 __chk_user_ptr(ptr); \
308 switch (size) { \
309 case 1: \
310 __put_user_asm_ex(x, ptr, "b", "b", "iq"); \
311 break; \
312 case 2: \
313 __put_user_asm_ex(x, ptr, "w", "w", "ir"); \
314 break; \
315 case 4: \
316 __put_user_asm_ex(x, ptr, "l", "k", "ir"); \
317 break; \
318 case 8: \
319 __put_user_asm_ex_u64((__typeof__(*ptr))(x), ptr); \
320 break; \
321 default: \
322 __put_user_bad(); \
323 } \
324} while (0)
325
3f168221 326#ifdef CONFIG_X86_32
b2f68038
BL
327#define __get_user_asm_u64(x, ptr, retval, errret) \
328({ \
329 __typeof__(ptr) __ptr = (ptr); \
33c9e972 330 asm volatile("\n" \
b2f68038
BL
331 "1: movl %2,%%eax\n" \
332 "2: movl %3,%%edx\n" \
33c9e972 333 "3:\n" \
b2f68038
BL
334 ".section .fixup,\"ax\"\n" \
335 "4: mov %4,%0\n" \
336 " xorl %%eax,%%eax\n" \
337 " xorl %%edx,%%edx\n" \
338 " jmp 3b\n" \
339 ".previous\n" \
75045f77
JH
340 _ASM_EXTABLE_UA(1b, 4b) \
341 _ASM_EXTABLE_UA(2b, 4b) \
33c9e972 342 : "=r" (retval), "=&A"(x) \
5ac751d9 343 : "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
b2f68038
BL
344 "i" (errret), "0" (retval)); \
345})
346
fe40c0af 347#define __get_user_asm_ex_u64(x, ptr) (x) = __get_user_bad()
3f168221
GC
348#else
349#define __get_user_asm_u64(x, ptr, retval, errret) \
350 __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
fe40c0af
HS
351#define __get_user_asm_ex_u64(x, ptr) \
352 __get_user_asm_ex(x, ptr, "q", "", "=r")
3f168221
GC
353#endif
354
355#define __get_user_size(x, ptr, size, retval, errret) \
356do { \
357 retval = 0; \
358 __chk_user_ptr(ptr); \
359 switch (size) { \
360 case 1: \
361 __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
362 break; \
363 case 2: \
364 __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
365 break; \
366 case 4: \
367 __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
368 break; \
369 case 8: \
370 __get_user_asm_u64(x, ptr, retval, errret); \
371 break; \
372 default: \
373 (x) = __get_user_bad(); \
374 } \
375} while (0)
376
377#define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11f1a4b9 378 asm volatile("\n" \
63bcff2a 379 "1: mov"itype" %2,%"rtype"1\n" \
11f1a4b9 380 "2:\n" \
3f168221
GC
381 ".section .fixup,\"ax\"\n" \
382 "3: mov %3,%0\n" \
383 " xor"itype" %"rtype"1,%"rtype"1\n" \
384 " jmp 2b\n" \
385 ".previous\n" \
75045f77 386 _ASM_EXTABLE_UA(1b, 3b) \
3f168221
GC
387 : "=r" (err), ltype(x) \
388 : "m" (__m(addr)), "i" (errret), "0" (err))
389
122b05dd
AV
390#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \
391 asm volatile("\n" \
392 "1: mov"itype" %2,%"rtype"1\n" \
393 "2:\n" \
394 ".section .fixup,\"ax\"\n" \
395 "3: mov %3,%0\n" \
396 " jmp 2b\n" \
397 ".previous\n" \
75045f77 398 _ASM_EXTABLE_UA(1b, 3b) \
122b05dd
AV
399 : "=r" (err), ltype(x) \
400 : "m" (__m(addr)), "i" (errret), "0" (err))
401
11f1a4b9
LT
402/*
403 * This doesn't do __uaccess_begin/end - the exception handling
404 * around it must do that.
405 */
fe40c0af
HS
406#define __get_user_size_ex(x, ptr, size) \
407do { \
408 __chk_user_ptr(ptr); \
409 switch (size) { \
410 case 1: \
411 __get_user_asm_ex(x, ptr, "b", "b", "=q"); \
412 break; \
413 case 2: \
414 __get_user_asm_ex(x, ptr, "w", "w", "=r"); \
415 break; \
416 case 4: \
417 __get_user_asm_ex(x, ptr, "l", "k", "=r"); \
418 break; \
419 case 8: \
420 __get_user_asm_ex_u64(x, ptr); \
421 break; \
422 default: \
423 (x) = __get_user_bad(); \
424 } \
425} while (0)
426
427#define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
428 asm volatile("1: mov"itype" %1,%"rtype"0\n" \
429 "2:\n" \
1c109fab
AV
430 ".section .fixup,\"ax\"\n" \
431 "3:xor"itype" %"rtype"0,%"rtype"0\n" \
432 " jmp 2b\n" \
433 ".previous\n" \
434 _ASM_EXTABLE_EX(1b, 3b) \
fe40c0af
HS
435 : ltype(x) : "m" (__m(addr)))
436
dc70ddf4
GC
437#define __put_user_nocheck(x, ptr, size) \
438({ \
16855f87 439 int __pu_err; \
11f1a4b9 440 __uaccess_begin(); \
dc70ddf4 441 __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
11f1a4b9 442 __uaccess_end(); \
a76cf66e 443 __builtin_expect(__pu_err, 0); \
dc70ddf4
GC
444})
445
3f168221
GC
446#define __get_user_nocheck(x, ptr, size) \
447({ \
16855f87 448 int __gu_err; \
b2f68038 449 __inttype(*(ptr)) __gu_val; \
304ec1b0 450 __uaccess_begin_nospec(); \
3f168221 451 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11f1a4b9 452 __uaccess_end(); \
3f168221 453 (x) = (__force __typeof__(*(ptr)))__gu_val; \
a76cf66e 454 __builtin_expect(__gu_err, 0); \
3f168221 455})
dc70ddf4
GC
456
457/* FIXME: this hack is definitely wrong -AK */
458struct __large_struct { unsigned long buf[100]; };
459#define __m(x) (*(struct __large_struct __user *)(x))
460
461/*
462 * Tell gcc we read from memory instead of writing: this is because
463 * we do not write to any memory gcc knows about, so there are no
464 * aliasing issues.
465 */
466#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11f1a4b9 467 asm volatile("\n" \
63bcff2a 468 "1: mov"itype" %"rtype"1,%2\n" \
11f1a4b9 469 "2:\n" \
dc70ddf4
GC
470 ".section .fixup,\"ax\"\n" \
471 "3: mov %3,%0\n" \
472 " jmp 2b\n" \
473 ".previous\n" \
75045f77 474 _ASM_EXTABLE_UA(1b, 3b) \
dc70ddf4
GC
475 : "=r"(err) \
476 : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
fe40c0af
HS
477
478#define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
5e88353d
PA
479 asm volatile("1: mov"itype" %"rtype"0,%1\n" \
480 "2:\n" \
535c0c34 481 _ASM_EXTABLE_EX(1b, 2b) \
fe40c0af
HS
482 : : ltype(x), "m" (__m(addr)))
483
484/*
485 * uaccess_try and catch
486 */
487#define uaccess_try do { \
dfa9a942 488 current->thread.uaccess_err = 0; \
11f1a4b9 489 __uaccess_begin(); \
fe40c0af
HS
490 barrier();
491
b3bbfb3f
DW
492#define uaccess_try_nospec do { \
493 current->thread.uaccess_err = 0; \
494 __uaccess_begin_nospec(); \
495
fe40c0af 496#define uaccess_catch(err) \
11f1a4b9 497 __uaccess_end(); \
dfa9a942 498 (err) |= (current->thread.uaccess_err ? -EFAULT : 0); \
fe40c0af
HS
499} while (0)
500
8cb834e9
GC
501/**
502 * __get_user: - Get a simple variable from user space, with less checking.
503 * @x: Variable to store result.
504 * @ptr: Source address, in user space.
505 *
b3c395ef
DH
506 * Context: User context only. This function may sleep if pagefaults are
507 * enabled.
8cb834e9
GC
508 *
509 * This macro copies a single simple variable from user space to kernel
510 * space. It supports simple types like char and int, but not larger
511 * data types like structures or arrays.
512 *
513 * @ptr must have pointer-to-simple-variable type, and the result of
514 * dereferencing @ptr must be assignable to @x without a cast.
515 *
516 * Caller must check the pointer with access_ok() before calling this
517 * function.
518 *
519 * Returns zero on success, or -EFAULT on error.
520 * On error, the variable @x is set to zero.
521 */
522
523#define __get_user(x, ptr) \
524 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
fe40c0af 525
8cb834e9
GC
526/**
527 * __put_user: - Write a simple value into user space, with less checking.
528 * @x: Value to copy to user space.
529 * @ptr: Destination address, in user space.
530 *
b3c395ef
DH
531 * Context: User context only. This function may sleep if pagefaults are
532 * enabled.
8cb834e9
GC
533 *
534 * This macro copies a single simple value from kernel space to user
535 * space. It supports simple types like char and int, but not larger
536 * data types like structures or arrays.
537 *
538 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
539 * to the result of dereferencing @ptr.
540 *
541 * Caller must check the pointer with access_ok() before calling this
542 * function.
543 *
544 * Returns zero on success, or -EFAULT on error.
545 */
546
547#define __put_user(x, ptr) \
548 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
dc70ddf4 549
fe40c0af
HS
550/*
551 * {get|put}_user_try and catch
552 *
553 * get_user_try {
554 * get_user_ex(...);
555 * } get_user_catch(err)
556 */
304ec1b0 557#define get_user_try uaccess_try_nospec
fe40c0af 558#define get_user_catch(err) uaccess_catch(err)
fe40c0af
HS
559
560#define get_user_ex(x, ptr) do { \
561 unsigned long __gue_val; \
562 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
563 (x) = (__force __typeof__(*(ptr)))__gue_val; \
564} while (0)
565
019a1369
HS
566#define put_user_try uaccess_try
567#define put_user_catch(err) uaccess_catch(err)
568
fe40c0af
HS
569#define put_user_ex(x, ptr) \
570 __put_user_size_ex((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
571
1ac2e6ca
RR
572extern unsigned long
573copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
92ae03f2
LT
574extern __must_check long
575strncpy_from_user(char *dst, const char __user *src, long count);
1ac2e6ca 576
5723aa99
LT
577extern __must_check long strnlen_user(const char __user *str, long n);
578
a052858f
PA
579unsigned long __must_check clear_user(void __user *mem, unsigned long len);
580unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
581
f09174c5
QR
582extern void __cmpxchg_wrong_size(void)
583 __compiletime_error("Bad argument size for cmpxchg");
584
585#define __user_atomic_cmpxchg_inatomic(uval, ptr, old, new, size) \
586({ \
587 int __ret = 0; \
588 __typeof__(ptr) __uval = (uval); \
589 __typeof__(*(ptr)) __old = (old); \
590 __typeof__(*(ptr)) __new = (new); \
304ec1b0 591 __uaccess_begin_nospec(); \
f09174c5
QR
592 switch (size) { \
593 case 1: \
594 { \
11f1a4b9 595 asm volatile("\n" \
f09174c5 596 "1:\t" LOCK_PREFIX "cmpxchgb %4, %2\n" \
11f1a4b9 597 "2:\n" \
f09174c5
QR
598 "\t.section .fixup, \"ax\"\n" \
599 "3:\tmov %3, %0\n" \
600 "\tjmp 2b\n" \
601 "\t.previous\n" \
75045f77 602 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
603 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
604 : "i" (-EFAULT), "q" (__new), "1" (__old) \
605 : "memory" \
606 ); \
607 break; \
608 } \
609 case 2: \
610 { \
11f1a4b9 611 asm volatile("\n" \
f09174c5 612 "1:\t" LOCK_PREFIX "cmpxchgw %4, %2\n" \
11f1a4b9 613 "2:\n" \
f09174c5
QR
614 "\t.section .fixup, \"ax\"\n" \
615 "3:\tmov %3, %0\n" \
616 "\tjmp 2b\n" \
617 "\t.previous\n" \
75045f77 618 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
619 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
620 : "i" (-EFAULT), "r" (__new), "1" (__old) \
621 : "memory" \
622 ); \
623 break; \
624 } \
625 case 4: \
626 { \
11f1a4b9 627 asm volatile("\n" \
f09174c5 628 "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" \
11f1a4b9 629 "2:\n" \
f09174c5
QR
630 "\t.section .fixup, \"ax\"\n" \
631 "3:\tmov %3, %0\n" \
632 "\tjmp 2b\n" \
633 "\t.previous\n" \
75045f77 634 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
635 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
636 : "i" (-EFAULT), "r" (__new), "1" (__old) \
637 : "memory" \
638 ); \
639 break; \
640 } \
641 case 8: \
642 { \
643 if (!IS_ENABLED(CONFIG_X86_64)) \
644 __cmpxchg_wrong_size(); \
645 \
11f1a4b9 646 asm volatile("\n" \
f09174c5 647 "1:\t" LOCK_PREFIX "cmpxchgq %4, %2\n" \
11f1a4b9 648 "2:\n" \
f09174c5
QR
649 "\t.section .fixup, \"ax\"\n" \
650 "3:\tmov %3, %0\n" \
651 "\tjmp 2b\n" \
652 "\t.previous\n" \
75045f77 653 _ASM_EXTABLE_UA(1b, 3b) \
f09174c5
QR
654 : "+r" (__ret), "=a" (__old), "+m" (*(ptr)) \
655 : "i" (-EFAULT), "r" (__new), "1" (__old) \
656 : "memory" \
657 ); \
658 break; \
659 } \
660 default: \
661 __cmpxchg_wrong_size(); \
662 } \
11f1a4b9 663 __uaccess_end(); \
f09174c5
QR
664 *__uval = __old; \
665 __ret; \
666})
667
668#define user_atomic_cmpxchg_inatomic(uval, ptr, old, new) \
669({ \
96d4f267 670 access_ok((ptr), sizeof(*(ptr))) ? \
f09174c5
QR
671 __user_atomic_cmpxchg_inatomic((uval), (ptr), \
672 (old), (new), sizeof(*(ptr))) : \
673 -EFAULT; \
674})
675
8bc7de0c
GC
676/*
677 * movsl can be slow when source and dest are not both 8-byte aligned
678 */
679#ifdef CONFIG_X86_INTEL_USERCOPY
680extern struct movsl_mask {
681 int mask;
682} ____cacheline_aligned_in_smp movsl_mask;
683#endif
684
22cac167
GC
685#define ARCH_HAS_NOCACHE_UACCESS 1
686
96a388de 687#ifdef CONFIG_X86_32
a1ce3928 688# include <asm/uaccess_32.h>
96a388de 689#else
a1ce3928 690# include <asm/uaccess_64.h>
96a388de 691#endif
ca233862 692
10013ebb
AK
693/*
694 * We rely on the nested NMI work to allow atomic faults from the NMI path; the
695 * nested NMI paths are careful to preserve CR2.
696 *
697 * Caller must use pagefault_enable/disable, or run in interrupt context,
698 * and also do a uaccess_ok() check
699 */
700#define __copy_from_user_nmi __copy_from_user_inatomic
701
5b24a7a2
LT
702/*
703 * The "unsafe" user accesses aren't really "unsafe", but the naming
704 * is a big fat warning: you have to not only do the access_ok()
705 * checking before using them, but you have to surround them with the
706 * user_access_begin/end() pair.
707 */
708#define user_access_begin() __uaccess_begin()
709#define user_access_end() __uaccess_end()
710
1bd4403d
LT
711#define unsafe_put_user(x, ptr, err_label) \
712do { \
5b24a7a2 713 int __pu_err; \
334a023e
LT
714 __typeof__(*(ptr)) __pu_val = (x); \
715 __put_user_size(__pu_val, (ptr), sizeof(*(ptr)), __pu_err, -EFAULT); \
1bd4403d
LT
716 if (unlikely(__pu_err)) goto err_label; \
717} while (0)
5b24a7a2 718
1bd4403d
LT
719#define unsafe_get_user(x, ptr, err_label) \
720do { \
5b24a7a2 721 int __gu_err; \
334a023e 722 __inttype(*(ptr)) __gu_val; \
5b24a7a2
LT
723 __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
724 (x) = (__force __typeof__(*(ptr)))__gu_val; \
1bd4403d
LT
725 if (unlikely(__gu_err)) goto err_label; \
726} while (0)
5b24a7a2 727
1965aae3 728#endif /* _ASM_X86_UACCESS_H */
8174c430 729