Commit | Line | Data |
---|---|---|
0aea86a2 CM |
1 | /* |
2 | * Based on arch/arm/include/asm/uaccess.h | |
3 | * | |
4 | * Copyright (C) 2012 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | #ifndef __ASM_UACCESS_H | |
19 | #define __ASM_UACCESS_H | |
20 | ||
bd38967d | 21 | #include <asm/alternative.h> |
4b65a5db | 22 | #include <asm/kernel-pgtable.h> |
bd38967d CM |
23 | #include <asm/sysreg.h> |
24 | ||
0aea86a2 CM |
25 | /* |
26 | * User space memory access functions | |
27 | */ | |
87261d19 | 28 | #include <linux/bitops.h> |
bffe1baf | 29 | #include <linux/kasan-checks.h> |
0aea86a2 | 30 | #include <linux/string.h> |
0aea86a2 | 31 | |
338d4f49 | 32 | #include <asm/cpufeature.h> |
0aea86a2 | 33 | #include <asm/ptrace.h> |
0aea86a2 CM |
34 | #include <asm/memory.h> |
35 | #include <asm/compiler.h> | |
46583939 | 36 | #include <asm/extable.h> |
0aea86a2 CM |
37 | |
38 | #define KERNEL_DS (-1UL) | |
39 | #define get_ds() (KERNEL_DS) | |
40 | ||
41 | #define USER_DS TASK_SIZE_64 | |
42 | #define get_fs() (current_thread_info()->addr_limit) | |
43 | ||
44 | static inline void set_fs(mm_segment_t fs) | |
45 | { | |
46 | current_thread_info()->addr_limit = fs; | |
57f4959b | 47 | |
cf7de27a TG |
48 | /* On user-mode return, check fs is correct */ |
49 | set_thread_flag(TIF_FSCHECK); | |
50 | ||
57f4959b JM |
51 | /* |
52 | * Enable/disable UAO so that copy_to_user() etc can access | |
53 | * kernel memory with the unprivileged instructions. | |
54 | */ | |
55 | if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS) | |
56 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); | |
57 | else | |
58 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO, | |
59 | CONFIG_ARM64_UAO)); | |
0aea86a2 CM |
60 | } |
61 | ||
967f0e5d | 62 | #define segment_eq(a, b) ((a) == (b)) |
0aea86a2 | 63 | |
0aea86a2 CM |
64 | /* |
65 | * Test whether a block of memory is a valid user space address. | |
66 | * Returns 1 if the range is valid, 0 otherwise. | |
67 | * | |
68 | * This is equivalent to the following test: | |
31b1e940 | 69 | * (u65)addr + (u65)size <= current->addr_limit |
0aea86a2 CM |
70 | * |
71 | * This needs 65-bit arithmetic. | |
72 | */ | |
73 | #define __range_ok(addr, size) \ | |
74 | ({ \ | |
c396fe7f | 75 | unsigned long __addr = (unsigned long)(addr); \ |
0aea86a2 CM |
76 | unsigned long flag, roksum; \ |
77 | __chk_user_ptr(addr); \ | |
31b1e940 | 78 | asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \ |
0aea86a2 | 79 | : "=&r" (flag), "=&r" (roksum) \ |
a06040d7 | 80 | : "1" (__addr), "Ir" (size), \ |
0aea86a2 CM |
81 | "r" (current_thread_info()->addr_limit) \ |
82 | : "cc"); \ | |
83 | flag; \ | |
84 | }) | |
85 | ||
87261d19 | 86 | /* |
7dcd9dd8 KM |
87 | * When dealing with data aborts, watchpoints, or instruction traps we may end |
88 | * up with a tagged userland pointer. Clear the tag to get a sane pointer to | |
89 | * pass on to access_ok(), for instance. | |
87261d19 AP |
90 | */ |
91 | #define untagged_addr(addr) sign_extend64(addr, 55) | |
92 | ||
0aea86a2 | 93 | #define access_ok(type, addr, size) __range_ok(addr, size) |
12a0ef7b | 94 | #define user_addr_max get_fs |
0aea86a2 | 95 | |
6c94f27a AB |
96 | #define _ASM_EXTABLE(from, to) \ |
97 | " .pushsection __ex_table, \"a\"\n" \ | |
98 | " .align 3\n" \ | |
99 | " .long (" #from " - .), (" #to " - .)\n" \ | |
100 | " .popsection\n" | |
101 | ||
bd38967d CM |
102 | /* |
103 | * User access enabling/disabling. | |
104 | */ | |
4b65a5db CM |
105 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
106 | static inline void __uaccess_ttbr0_disable(void) | |
107 | { | |
108 | unsigned long ttbr; | |
109 | ||
110 | /* reserved_ttbr0 placed at the end of swapper_pg_dir */ | |
111 | ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE; | |
112 | write_sysreg(ttbr, ttbr0_el1); | |
113 | isb(); | |
114 | } | |
115 | ||
116 | static inline void __uaccess_ttbr0_enable(void) | |
117 | { | |
118 | unsigned long flags; | |
119 | ||
120 | /* | |
121 | * Disable interrupts to avoid preemption between reading the 'ttbr0' | |
122 | * variable and the MSR. A context switch could trigger an ASID | |
123 | * roll-over and an update of 'ttbr0'. | |
124 | */ | |
125 | local_irq_save(flags); | |
126 | write_sysreg(current_thread_info()->ttbr0, ttbr0_el1); | |
127 | isb(); | |
128 | local_irq_restore(flags); | |
129 | } | |
130 | ||
131 | static inline bool uaccess_ttbr0_disable(void) | |
132 | { | |
133 | if (!system_uses_ttbr0_pan()) | |
134 | return false; | |
135 | __uaccess_ttbr0_disable(); | |
136 | return true; | |
137 | } | |
138 | ||
139 | static inline bool uaccess_ttbr0_enable(void) | |
140 | { | |
141 | if (!system_uses_ttbr0_pan()) | |
142 | return false; | |
143 | __uaccess_ttbr0_enable(); | |
144 | return true; | |
145 | } | |
146 | #else | |
147 | static inline bool uaccess_ttbr0_disable(void) | |
148 | { | |
149 | return false; | |
150 | } | |
151 | ||
152 | static inline bool uaccess_ttbr0_enable(void) | |
153 | { | |
154 | return false; | |
155 | } | |
156 | #endif | |
157 | ||
bd38967d CM |
158 | #define __uaccess_disable(alt) \ |
159 | do { \ | |
4b65a5db CM |
160 | if (!uaccess_ttbr0_disable()) \ |
161 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \ | |
162 | CONFIG_ARM64_PAN)); \ | |
bd38967d CM |
163 | } while (0) |
164 | ||
165 | #define __uaccess_enable(alt) \ | |
166 | do { \ | |
75037120 | 167 | if (!uaccess_ttbr0_enable()) \ |
4b65a5db CM |
168 | asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \ |
169 | CONFIG_ARM64_PAN)); \ | |
bd38967d CM |
170 | } while (0) |
171 | ||
172 | static inline void uaccess_disable(void) | |
173 | { | |
174 | __uaccess_disable(ARM64_HAS_PAN); | |
175 | } | |
176 | ||
177 | static inline void uaccess_enable(void) | |
178 | { | |
179 | __uaccess_enable(ARM64_HAS_PAN); | |
180 | } | |
181 | ||
182 | /* | |
183 | * These functions are no-ops when UAO is present. | |
184 | */ | |
185 | static inline void uaccess_disable_not_uao(void) | |
186 | { | |
187 | __uaccess_disable(ARM64_ALT_PAN_NOT_UAO); | |
188 | } | |
189 | ||
190 | static inline void uaccess_enable_not_uao(void) | |
191 | { | |
192 | __uaccess_enable(ARM64_ALT_PAN_NOT_UAO); | |
193 | } | |
194 | ||
0aea86a2 CM |
195 | /* |
196 | * The "__xxx" versions of the user access functions do not verify the address | |
197 | * space - it must have been done previously with a separate "access_ok()" | |
198 | * call. | |
199 | * | |
200 | * The "__xxx_error" versions set the third argument to -EFAULT if an error | |
201 | * occurs, and leave it unchanged on success. | |
202 | */ | |
57f4959b | 203 | #define __get_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
0aea86a2 | 204 | asm volatile( \ |
57f4959b JM |
205 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
206 | alt_instr " " reg "1, [%2]\n", feature) \ | |
0aea86a2 CM |
207 | "2:\n" \ |
208 | " .section .fixup, \"ax\"\n" \ | |
209 | " .align 2\n" \ | |
210 | "3: mov %w0, %3\n" \ | |
211 | " mov %1, #0\n" \ | |
212 | " b 2b\n" \ | |
213 | " .previous\n" \ | |
6c94f27a | 214 | _ASM_EXTABLE(1b, 3b) \ |
0aea86a2 CM |
215 | : "+r" (err), "=&r" (x) \ |
216 | : "r" (addr), "i" (-EFAULT)) | |
217 | ||
218 | #define __get_user_err(x, ptr, err) \ | |
219 | do { \ | |
220 | unsigned long __gu_val; \ | |
221 | __chk_user_ptr(ptr); \ | |
bd38967d | 222 | uaccess_enable_not_uao(); \ |
0aea86a2 CM |
223 | switch (sizeof(*(ptr))) { \ |
224 | case 1: \ | |
57f4959b JM |
225 | __get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr), \ |
226 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
227 | break; \ |
228 | case 2: \ | |
57f4959b JM |
229 | __get_user_asm("ldrh", "ldtrh", "%w", __gu_val, (ptr), \ |
230 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
231 | break; \ |
232 | case 4: \ | |
57f4959b JM |
233 | __get_user_asm("ldr", "ldtr", "%w", __gu_val, (ptr), \ |
234 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
235 | break; \ |
236 | case 8: \ | |
d135b8b5 | 237 | __get_user_asm("ldr", "ldtr", "%x", __gu_val, (ptr), \ |
57f4959b | 238 | (err), ARM64_HAS_UAO); \ |
0aea86a2 CM |
239 | break; \ |
240 | default: \ | |
241 | BUILD_BUG(); \ | |
242 | } \ | |
bd38967d | 243 | uaccess_disable_not_uao(); \ |
58fff517 | 244 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ |
0aea86a2 CM |
245 | } while (0) |
246 | ||
247 | #define __get_user(x, ptr) \ | |
248 | ({ \ | |
249 | int __gu_err = 0; \ | |
250 | __get_user_err((x), (ptr), __gu_err); \ | |
251 | __gu_err; \ | |
252 | }) | |
253 | ||
254 | #define __get_user_error(x, ptr, err) \ | |
255 | ({ \ | |
256 | __get_user_err((x), (ptr), (err)); \ | |
257 | (void)0; \ | |
258 | }) | |
259 | ||
0aea86a2 CM |
260 | #define get_user(x, ptr) \ |
261 | ({ \ | |
1f65c13e | 262 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
56d2ef78 | 263 | might_fault(); \ |
1f65c13e AT |
264 | access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \ |
265 | __get_user((x), __p) : \ | |
0aea86a2 CM |
266 | ((x) = 0, -EFAULT); \ |
267 | }) | |
268 | ||
57f4959b | 269 | #define __put_user_asm(instr, alt_instr, reg, x, addr, err, feature) \ |
0aea86a2 | 270 | asm volatile( \ |
57f4959b JM |
271 | "1:"ALTERNATIVE(instr " " reg "1, [%2]\n", \ |
272 | alt_instr " " reg "1, [%2]\n", feature) \ | |
0aea86a2 CM |
273 | "2:\n" \ |
274 | " .section .fixup,\"ax\"\n" \ | |
275 | " .align 2\n" \ | |
276 | "3: mov %w0, %3\n" \ | |
277 | " b 2b\n" \ | |
278 | " .previous\n" \ | |
6c94f27a | 279 | _ASM_EXTABLE(1b, 3b) \ |
0aea86a2 CM |
280 | : "+r" (err) \ |
281 | : "r" (x), "r" (addr), "i" (-EFAULT)) | |
282 | ||
283 | #define __put_user_err(x, ptr, err) \ | |
284 | do { \ | |
285 | __typeof__(*(ptr)) __pu_val = (x); \ | |
286 | __chk_user_ptr(ptr); \ | |
bd38967d | 287 | uaccess_enable_not_uao(); \ |
0aea86a2 CM |
288 | switch (sizeof(*(ptr))) { \ |
289 | case 1: \ | |
57f4959b JM |
290 | __put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr), \ |
291 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
292 | break; \ |
293 | case 2: \ | |
57f4959b JM |
294 | __put_user_asm("strh", "sttrh", "%w", __pu_val, (ptr), \ |
295 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
296 | break; \ |
297 | case 4: \ | |
57f4959b JM |
298 | __put_user_asm("str", "sttr", "%w", __pu_val, (ptr), \ |
299 | (err), ARM64_HAS_UAO); \ | |
0aea86a2 CM |
300 | break; \ |
301 | case 8: \ | |
d135b8b5 | 302 | __put_user_asm("str", "sttr", "%x", __pu_val, (ptr), \ |
57f4959b | 303 | (err), ARM64_HAS_UAO); \ |
0aea86a2 CM |
304 | break; \ |
305 | default: \ | |
306 | BUILD_BUG(); \ | |
307 | } \ | |
bd38967d | 308 | uaccess_disable_not_uao(); \ |
0aea86a2 CM |
309 | } while (0) |
310 | ||
311 | #define __put_user(x, ptr) \ | |
312 | ({ \ | |
313 | int __pu_err = 0; \ | |
314 | __put_user_err((x), (ptr), __pu_err); \ | |
315 | __pu_err; \ | |
316 | }) | |
317 | ||
318 | #define __put_user_error(x, ptr, err) \ | |
319 | ({ \ | |
320 | __put_user_err((x), (ptr), (err)); \ | |
321 | (void)0; \ | |
322 | }) | |
323 | ||
0aea86a2 CM |
324 | #define put_user(x, ptr) \ |
325 | ({ \ | |
1f65c13e | 326 | __typeof__(*(ptr)) __user *__p = (ptr); \ |
56d2ef78 | 327 | might_fault(); \ |
1f65c13e AT |
328 | access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \ |
329 | __put_user((x), __p) : \ | |
0aea86a2 CM |
330 | -EFAULT; \ |
331 | }) | |
332 | ||
bffe1baf | 333 | extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); |
92430dab | 334 | #define raw_copy_from_user __arch_copy_from_user |
bffe1baf | 335 | extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); |
92430dab AV |
336 | #define raw_copy_to_user __arch_copy_to_user |
337 | extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); | |
0aea86a2 | 338 | extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); |
92430dab AV |
339 | #define INLINE_COPY_TO_USER |
340 | #define INLINE_COPY_FROM_USER | |
0aea86a2 CM |
341 | |
342 | static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) | |
343 | { | |
344 | if (access_ok(VERIFY_WRITE, to, n)) | |
345 | n = __clear_user(to, n); | |
346 | return n; | |
347 | } | |
348 | ||
12a0ef7b | 349 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
0aea86a2 | 350 | |
12a0ef7b | 351 | extern __must_check long strnlen_user(const char __user *str, long n); |
0aea86a2 | 352 | |
5d7bdeb1 RM |
353 | #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE |
354 | struct page; | |
355 | void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len); | |
356 | extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); | |
357 | ||
358 | static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) | |
359 | { | |
360 | kasan_check_write(dst, size); | |
361 | return __copy_user_flushcache(dst, src, size); | |
362 | } | |
363 | #endif | |
364 | ||
0aea86a2 | 365 | #endif /* __ASM_UACCESS_H */ |