1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_UACCESS_64_H
3 #define _ASM_X86_UACCESS_64_H
6 * User space memory access functions
8 #include <linux/compiler.h>
9 #include <linux/lockdep.h>
10 #include <linux/kasan-checks.h>
11 #include <asm/alternative.h>
12 #include <asm/cpufeatures.h>
15 #ifdef CONFIG_ADDRESS_MASKING
17 * Mask out tag bits from the address.
19 static inline unsigned long __untagged_addr(unsigned long addr)
22 * Refer tlbstate_untag_mask directly to avoid RIP-relative relocation
23 * in alternative instructions. The relocation gets wrong when gets
24 * copied to the target place.
27 "and %%gs:tlbstate_untag_mask, %[addr]\n\t", X86_FEATURE_LAM)
28 : [addr] "+r" (addr) : "m" (tlbstate_untag_mask));
33 #define untagged_addr(addr) ({ \
34 unsigned long __addr = (__force unsigned long)(addr); \
35 (__force __typeof__(addr))__untagged_addr(__addr); \
38 static inline unsigned long __untagged_addr_remote(struct mm_struct *mm,
41 mmap_assert_locked(mm);
42 return addr & (mm)->context.untag_mask;
45 #define untagged_addr_remote(mm, addr) ({ \
46 unsigned long __addr = (__force unsigned long)(addr); \
47 (__force __typeof__(addr))__untagged_addr_remote(mm, __addr); \
53 * The virtual address space space is logically divided into a kernel
54 * half and a user half. When cast to a signed type, user pointers
55 * are positive and kernel pointers are negative.
57 #define valid_user_address(x) ((long)(x) >= 0)
60 * User pointers can have tag bits on x86-64. This scheme tolerates
61 * arbitrary values in those bits rather then masking them off.
64 * 1. 'ptr' must be in the user half of the address space
65 * 2. 'ptr+size' must not overflow into kernel addresses
67 * Note that addresses around the sign change are not valid addresses,
68 * and will GP-fault even with LAM enabled if the sign bit is set (see
69 * "CR3.LAM_SUP" that can narrow the canonicality check if we ever
70 * enable it, but not remove it entirely).
72 * So the "overflow into kernel addresses" does not imply some sudden
73 * exact boundary at the sign bit, and we can allow a lot of slop on the
76 * In fact, we could probably remove the size check entirely, since
77 * any kernel accesses will be in increasing address order starting
78 * at 'ptr', and even if the end might be in kernel space, we'll
79 * hit the GP faults for non-canonical accesses before we ever get
82 * That's a separate optimization, for now just handle the small
85 static inline bool __access_ok(const void __user *ptr, unsigned long size)
87 if (__builtin_constant_p(size <= PAGE_SIZE) && size <= PAGE_SIZE) {
88 return valid_user_address(ptr);
90 unsigned long sum = size + (unsigned long)ptr;
91 return valid_user_address(sum) && sum >= (unsigned long)ptr;
94 #define __access_ok __access_ok
97 * Copy To/From Userspace
100 /* Handles exceptions in both to and from, but doesn't do access_ok */
101 __must_check unsigned long
102 rep_movs_alternative(void *to, const void *from, unsigned len);
104 static __always_inline __must_check unsigned long
105 copy_user_generic(void *to, const void *from, unsigned long len)
109 * If CPU has FSRM feature, use 'rep movs'.
110 * Otherwise, use rep_movs_alternative.
114 ALTERNATIVE("rep movsb",
115 "call rep_movs_alternative", ALT_NOT(X86_FEATURE_FSRM))
117 _ASM_EXTABLE_UA(1b, 2b)
118 :"+c" (len), "+D" (to), "+S" (from), ASM_CALL_CONSTRAINT
119 : : "memory", "rax");
124 static __always_inline __must_check unsigned long
125 raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
127 return copy_user_generic(dst, (__force void *)src, size);
130 static __always_inline __must_check unsigned long
131 raw_copy_to_user(void __user *dst, const void *src, unsigned long size)
133 return copy_user_generic((__force void *)dst, src, size);
136 extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size);
137 extern long __copy_user_flushcache(void *dst, const void __user *src, unsigned size);
140 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
144 kasan_check_write(dst, size);
146 ret = __copy_user_nocache(dst, src, size);
152 __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
154 kasan_check_write(dst, size);
155 return __copy_user_flushcache(dst, src, size);
162 __must_check unsigned long
163 rep_stos_alternative(void __user *addr, unsigned long len);
165 static __always_inline __must_check unsigned long __clear_user(void __user *addr, unsigned long size)
171 * No memory constraint because it doesn't change any memory gcc
176 ALTERNATIVE("rep stosb",
177 "call rep_stos_alternative", ALT_NOT(X86_FEATURE_FSRS))
179 _ASM_EXTABLE_UA(1b, 2b)
180 : "+c" (size), "+D" (addr), ASM_CALL_CONSTRAINT
188 static __always_inline unsigned long clear_user(void __user *to, unsigned long n)
190 if (__access_ok(to, n))
191 return __clear_user(to, n);
194 #endif /* _ASM_X86_UACCESS_64_H */