2 * Based on arch/arm/include/asm/uaccess.h
4 * Copyright (C) 2012 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #ifndef __ASM_UACCESS_H
19 #define __ASM_UACCESS_H
21 #include <asm/alternative.h>
22 #include <asm/kernel-pgtable.h>
23 #include <asm/sysreg.h>
26 * User space memory access functions
28 #include <linux/bitops.h>
29 #include <linux/kasan-checks.h>
30 #include <linux/string.h>
32 #include <asm/cpufeature.h>
33 #include <asm/ptrace.h>
34 #include <asm/memory.h>
35 #include <asm/extable.h>
37 #define get_fs() (current_thread_info()->addr_limit)
39 static inline void set_fs(mm_segment_t fs)
41 current_thread_info()->addr_limit = fs;
44 * Prevent a mispredicted conditional call to set_fs from forwarding
45 * the wrong address limit to access_ok under speculation.
49 /* On user-mode return, check fs is correct */
50 set_thread_flag(TIF_FSCHECK);
53 * Enable/disable UAO so that copy_to_user() etc can access
54 * kernel memory with the unprivileged instructions.
56 if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
57 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
59 asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
63 #define segment_eq(a, b) ((a) == (b))
66 * Test whether a block of memory is a valid user space address.
67 * Returns 1 if the range is valid, 0 otherwise.
69 * This is equivalent to the following test:
70 * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
72 static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
74 unsigned long ret, limit = current_thread_info()->addr_limit;
78 // A + B <= C + 1 for all A,B,C, in four easy steps:
79 // 1: X = A + B; X' = X % 2^64
81 // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
82 " csel %1, xzr, %1, hi\n"
83 // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
84 // to compensate for the carry flag being set in step 4. For
85 // X > 2^64, X' merely has to remain nonzero, which it does.
86 " csinv %0, %0, xzr, cc\n"
87 // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
88 // comes from the carry in being clear. Otherwise, we are
89 // testing X' - C == 0, subject to the previous adjustments.
92 : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
97 #define access_ok(addr, size) __range_ok(addr, size)
98 #define user_addr_max get_fs
100 #define _ASM_EXTABLE(from, to) \
101 " .pushsection __ex_table, \"a\"\n" \
103 " .long (" #from " - .), (" #to " - .)\n" \
107 * User access enabling/disabling.
109 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
110 static inline void __uaccess_ttbr0_disable(void)
112 unsigned long flags, ttbr;
114 local_irq_save(flags);
115 ttbr = read_sysreg(ttbr1_el1);
116 ttbr &= ~TTBR_ASID_MASK;
117 /* reserved_ttbr0 placed before swapper_pg_dir */
118 write_sysreg(ttbr - RESERVED_TTBR0_SIZE, ttbr0_el1);
120 /* Set reserved ASID */
121 write_sysreg(ttbr, ttbr1_el1);
123 local_irq_restore(flags);
126 static inline void __uaccess_ttbr0_enable(void)
128 unsigned long flags, ttbr0, ttbr1;
131 * Disable interrupts to avoid preemption between reading the 'ttbr0'
132 * variable and the MSR. A context switch could trigger an ASID
133 * roll-over and an update of 'ttbr0'.
135 local_irq_save(flags);
136 ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
138 /* Restore active ASID */
139 ttbr1 = read_sysreg(ttbr1_el1);
140 ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
141 ttbr1 |= ttbr0 & TTBR_ASID_MASK;
142 write_sysreg(ttbr1, ttbr1_el1);
145 /* Restore user page table */
146 write_sysreg(ttbr0, ttbr0_el1);
148 local_irq_restore(flags);
151 static inline bool uaccess_ttbr0_disable(void)
153 if (!system_uses_ttbr0_pan())
155 __uaccess_ttbr0_disable();
159 static inline bool uaccess_ttbr0_enable(void)
161 if (!system_uses_ttbr0_pan())
163 __uaccess_ttbr0_enable();
167 static inline bool uaccess_ttbr0_disable(void)
172 static inline bool uaccess_ttbr0_enable(void)
178 static inline void __uaccess_disable_hw_pan(void)
180 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
184 static inline void __uaccess_enable_hw_pan(void)
186 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
190 #define __uaccess_disable(alt) \
192 if (!uaccess_ttbr0_disable()) \
193 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
194 CONFIG_ARM64_PAN)); \
197 #define __uaccess_enable(alt) \
199 if (!uaccess_ttbr0_enable()) \
200 asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
201 CONFIG_ARM64_PAN)); \
204 static inline void uaccess_disable(void)
206 __uaccess_disable(ARM64_HAS_PAN);
209 static inline void uaccess_enable(void)
211 __uaccess_enable(ARM64_HAS_PAN);
215 * These functions are no-ops when UAO is present.
217 static inline void uaccess_disable_not_uao(void)
219 __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
222 static inline void uaccess_enable_not_uao(void)
224 __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
228 * Sanitise a uaccess pointer such that it becomes NULL if above the
229 * current addr_limit.
231 #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
232 static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
234 void __user *safe_ptr;
237 " bics xzr, %1, %2\n"
238 " csel %0, %1, xzr, eq\n"
240 : "r" (ptr), "r" (current_thread_info()->addr_limit)
248 * The "__xxx" versions of the user access functions do not verify the address
249 * space - it must have been done previously with a separate "access_ok()"
252 * The "__xxx_error" versions set the third argument to -EFAULT if an error
253 * occurs, and leave it unchanged on success.
255 #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
257 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
258 alt_instr " " reg "1, [%2]\n", feature) \
260 " .section .fixup, \"ax\"\n" \
266 _ASM_EXTABLE(1b, 3b) \
267 : "+r" (err), "=&r" (x) \
268 : "r" (addr), "i" (-EFAULT))
270 #define __get_user_err(x, ptr, err) \
272 unsigned long __gu_val; \
273 __chk_user_ptr(ptr); \
274 uaccess_enable_not_uao(); \
275 switch (sizeof(*(ptr))) { \
277 __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \
278 (err), ARM64_HAS_UAO); \
281 __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \
282 (err), ARM64_HAS_UAO); \
285 __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \
286 (err), ARM64_HAS_UAO); \
289 __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \
290 (err), ARM64_HAS_UAO); \
295 uaccess_disable_not_uao(); \
296 (x) = (__force __typeof__(*(ptr)))__gu_val; \
299 #define __get_user_check(x, ptr, err) \
301 __typeof__(*(ptr)) __user *__p = (ptr); \
303 if (access_ok(__p, sizeof(*__p))) { \
304 __p = uaccess_mask_ptr(__p); \
305 __get_user_err((x), __p, (err)); \
307 (x) = 0; (err) = -EFAULT; \
311 #define __get_user_error(x, ptr, err) \
313 __get_user_check((x), (ptr), (err)); \
317 #define __get_user(x, ptr) \
320 __get_user_check((x), (ptr), __gu_err); \
324 #define get_user __get_user
326 #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \
328 "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \
329 alt_instr " " reg "1, [%2]\n", feature) \
331 " .section .fixup,\"ax\"\n" \
336 _ASM_EXTABLE(1b, 3b) \
338 : "r" (x), "r" (addr), "i" (-EFAULT))
340 #define __put_user_err(x, ptr, err) \
342 __typeof__(*(ptr)) __pu_val = (x); \
343 __chk_user_ptr(ptr); \
344 uaccess_enable_not_uao(); \
345 switch (sizeof(*(ptr))) { \
347 __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \
348 (err), ARM64_HAS_UAO); \
351 __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \
352 (err), ARM64_HAS_UAO); \
355 __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \
356 (err), ARM64_HAS_UAO); \
359 __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \
360 (err), ARM64_HAS_UAO); \
365 uaccess_disable_not_uao(); \
368 #define __put_user_check(x, ptr, err) \
370 __typeof__(*(ptr)) __user *__p = (ptr); \
372 if (access_ok(__p, sizeof(*__p))) { \
373 __p = uaccess_mask_ptr(__p); \
374 __put_user_err((x), __p, (err)); \
380 #define __put_user_error(x, ptr, err) \
382 __put_user_check((x), (ptr), (err)); \
386 #define __put_user(x, ptr) \
389 __put_user_check((x), (ptr), __pu_err); \
393 #define put_user __put_user
395 extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
396 #define raw_copy_from_user(to, from, n) \
398 __arch_copy_from_user((to), __uaccess_mask_ptr(from), (n)); \
401 extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
402 #define raw_copy_to_user(to, from, n) \
404 __arch_copy_to_user(__uaccess_mask_ptr(to), (from), (n)); \
407 extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
408 #define raw_copy_in_user(to, from, n) \
410 __arch_copy_in_user(__uaccess_mask_ptr(to), \
411 __uaccess_mask_ptr(from), (n)); \
414 #define INLINE_COPY_TO_USER
415 #define INLINE_COPY_FROM_USER
417 extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
418 static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
420 if (access_ok(to, n))
421 n = __arch_clear_user(__uaccess_mask_ptr(to), n);
424 #define clear_user __clear_user
426 extern long strncpy_from_user(char *dest, const char __user *src, long count);
428 extern __must_check long strnlen_user(const char __user *str, long n);
430 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
432 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
433 extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
435 static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
437 kasan_check_write(dst, size);
438 return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
442 #endif /* __ASM_UACCESS_H */