Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1d18c47c CM |
2 | /* |
3 | * Based on arch/arm/mm/fault.c | |
4 | * | |
5 | * Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1995-2004 Russell King | |
7 | * Copyright (C) 2012 ARM Ltd. | |
1d18c47c CM |
8 | */ |
9 | ||
d44f1b8d | 10 | #include <linux/acpi.h> |
42f91093 | 11 | #include <linux/bitfield.h> |
0edfa839 | 12 | #include <linux/extable.h> |
1d18c47c CM |
13 | #include <linux/signal.h> |
14 | #include <linux/mm.h> | |
15 | #include <linux/hardirq.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/kprobes.h> | |
18 | #include <linux/uaccess.h> | |
19 | #include <linux/page-flags.h> | |
3f07c014 | 20 | #include <linux/sched/signal.h> |
b17b0153 | 21 | #include <linux/sched/debug.h> |
1d18c47c CM |
22 | #include <linux/highmem.h> |
23 | #include <linux/perf_event.h> | |
7209c868 | 24 | #include <linux/preempt.h> |
e7c600f1 | 25 | #include <linux/hugetlb.h> |
1d18c47c | 26 | |
d44f1b8d | 27 | #include <asm/acpi.h> |
7209c868 | 28 | #include <asm/bug.h> |
3bbf7157 | 29 | #include <asm/cmpxchg.h> |
338d4f49 | 30 | #include <asm/cpufeature.h> |
1d18c47c | 31 | #include <asm/exception.h> |
9a0c0328 | 32 | #include <asm/daifflags.h> |
1d18c47c | 33 | #include <asm/debug-monitors.h> |
9141300a | 34 | #include <asm/esr.h> |
b6e43c0e | 35 | #include <asm/kprobes.h> |
bfe29874 | 36 | #include <asm/processor.h> |
338d4f49 | 37 | #include <asm/sysreg.h> |
1d18c47c CM |
38 | #include <asm/system_misc.h> |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/tlbflush.h> | |
92ff0674 | 41 | #include <asm/traps.h> |
1d18c47c | 42 | |
09a6adf5 VK |
43 | struct fault_info { |
44 | int (*fn)(unsigned long addr, unsigned int esr, | |
45 | struct pt_regs *regs); | |
46 | int sig; | |
47 | int code; | |
48 | const char *name; | |
49 | }; | |
50 | ||
51 | static const struct fault_info fault_info[]; | |
359048f9 | 52 | static struct fault_info debug_fault_info[]; |
09a6adf5 VK |
53 | |
54 | static inline const struct fault_info *esr_to_fault_info(unsigned int esr) | |
55 | { | |
00bbd5d9 | 56 | return fault_info + (esr & ESR_ELx_FSC); |
09a6adf5 | 57 | } |
3495386b | 58 | |
359048f9 AK |
59 | static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) |
60 | { | |
61 | return debug_fault_info + DBG_ESR_EVT(esr); | |
62 | } | |
63 | ||
1f9b8936 JT |
64 | static void data_abort_decode(unsigned int esr) |
65 | { | |
66 | pr_alert("Data abort info:\n"); | |
67 | ||
68 | if (esr & ESR_ELx_ISV) { | |
69 | pr_alert(" Access size = %u byte(s)\n", | |
70 | 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT)); | |
71 | pr_alert(" SSE = %lu, SRT = %lu\n", | |
72 | (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT, | |
73 | (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT); | |
74 | pr_alert(" SF = %lu, AR = %lu\n", | |
75 | (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, | |
76 | (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); | |
77 | } else { | |
0a6de8b8 | 78 | pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); |
1f9b8936 JT |
79 | } |
80 | ||
81 | pr_alert(" CM = %lu, WnR = %lu\n", | |
82 | (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, | |
83 | (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); | |
84 | } | |
85 | ||
1f9b8936 JT |
86 | static void mem_abort_decode(unsigned int esr) |
87 | { | |
88 | pr_alert("Mem abort info:\n"); | |
89 | ||
42dbf54e | 90 | pr_alert(" ESR = 0x%08x\n", esr); |
2951d5ef MC |
91 | pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", |
92 | ESR_ELx_EC(esr), esr_get_class_string(esr), | |
1f9b8936 JT |
93 | (esr & ESR_ELx_IL) ? 32 : 16); |
94 | pr_alert(" SET = %lu, FnV = %lu\n", | |
95 | (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, | |
96 | (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT); | |
97 | pr_alert(" EA = %lu, S1PTW = %lu\n", | |
98 | (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, | |
99 | (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); | |
100 | ||
101 | if (esr_is_data_abort(esr)) | |
102 | data_abort_decode(esr); | |
103 | } | |
104 | ||
e4365f96 MR |
105 | static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) |
106 | { | |
107 | /* Either init_pg_dir or swapper_pg_dir */ | |
108 | if (mm == &init_mm) | |
109 | return __pa_symbol(mm->pgd); | |
110 | ||
111 | return (unsigned long)virt_to_phys(mm->pgd); | |
112 | } | |
113 | ||
1d18c47c | 114 | /* |
67ce16ec | 115 | * Dump out the page tables associated with 'addr' in the currently active mm. |
1d18c47c | 116 | */ |
7048a597 | 117 | static void show_pte(unsigned long addr) |
1d18c47c | 118 | { |
67ce16ec | 119 | struct mm_struct *mm; |
20a004e7 WD |
120 | pgd_t *pgdp; |
121 | pgd_t pgd; | |
1d18c47c | 122 | |
356607f2 | 123 | if (is_ttbr0_addr(addr)) { |
67ce16ec KM |
124 | /* TTBR0 */ |
125 | mm = current->active_mm; | |
126 | if (mm == &init_mm) { | |
127 | pr_alert("[%016lx] user address but active_mm is swapper\n", | |
128 | addr); | |
129 | return; | |
130 | } | |
356607f2 | 131 | } else if (is_ttbr1_addr(addr)) { |
67ce16ec | 132 | /* TTBR1 */ |
1d18c47c | 133 | mm = &init_mm; |
67ce16ec KM |
134 | } else { |
135 | pr_alert("[%016lx] address between user and kernel address ranges\n", | |
136 | addr); | |
137 | return; | |
138 | } | |
1d18c47c | 139 | |
5383cc6e | 140 | pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", |
1eb34b6e | 141 | mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, |
e4365f96 | 142 | vabits_actual, mm_to_pgd_phys(mm)); |
20a004e7 WD |
143 | pgdp = pgd_offset(mm, addr); |
144 | pgd = READ_ONCE(*pgdp); | |
145 | pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); | |
1d18c47c CM |
146 | |
147 | do { | |
20a004e7 WD |
148 | pud_t *pudp, pud; |
149 | pmd_t *pmdp, pmd; | |
150 | pte_t *ptep, pte; | |
1d18c47c | 151 | |
20a004e7 | 152 | if (pgd_none(pgd) || pgd_bad(pgd)) |
1d18c47c CM |
153 | break; |
154 | ||
20a004e7 WD |
155 | pudp = pud_offset(pgdp, addr); |
156 | pud = READ_ONCE(*pudp); | |
157 | pr_cont(", pud=%016llx", pud_val(pud)); | |
158 | if (pud_none(pud) || pud_bad(pud)) | |
1d18c47c CM |
159 | break; |
160 | ||
20a004e7 WD |
161 | pmdp = pmd_offset(pudp, addr); |
162 | pmd = READ_ONCE(*pmdp); | |
163 | pr_cont(", pmd=%016llx", pmd_val(pmd)); | |
164 | if (pmd_none(pmd) || pmd_bad(pmd)) | |
1d18c47c CM |
165 | break; |
166 | ||
20a004e7 WD |
167 | ptep = pte_offset_map(pmdp, addr); |
168 | pte = READ_ONCE(*ptep); | |
169 | pr_cont(", pte=%016llx", pte_val(pte)); | |
170 | pte_unmap(ptep); | |
1d18c47c CM |
171 | } while(0); |
172 | ||
6ef4fb38 | 173 | pr_cont("\n"); |
1d18c47c CM |
174 | } |
175 | ||
66dbd6e6 CM |
176 | /* |
177 | * This function sets the access flags (dirty, accessed), as well as write | |
178 | * permission, and only to a more permissive setting. | |
179 | * | |
180 | * It needs to cope with hardware update of the accessed/dirty state by other | |
181 | * agents in the system and can safely skip the __sync_icache_dcache() call as, | |
182 | * like set_pte_at(), the PTE is never changed from no-exec to exec here. | |
183 | * | |
184 | * Returns whether or not the PTE actually changed. | |
185 | */ | |
186 | int ptep_set_access_flags(struct vm_area_struct *vma, | |
187 | unsigned long address, pte_t *ptep, | |
188 | pte_t entry, int dirty) | |
189 | { | |
3bbf7157 | 190 | pteval_t old_pteval, pteval; |
20a004e7 | 191 | pte_t pte = READ_ONCE(*ptep); |
66dbd6e6 | 192 | |
20a004e7 | 193 | if (pte_same(pte, entry)) |
66dbd6e6 CM |
194 | return 0; |
195 | ||
196 | /* only preserve the access flags and write permission */ | |
73e86cb0 | 197 | pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY; |
66dbd6e6 CM |
198 | |
199 | /* | |
200 | * Setting the flags must be done atomically to avoid racing with the | |
6d332747 CM |
201 | * hardware update of the access/dirty state. The PTE_RDONLY bit must |
202 | * be set to the most permissive (lowest value) of *ptep and entry | |
203 | * (calculated as: a & b == ~(~a | ~b)). | |
66dbd6e6 | 204 | */ |
6d332747 | 205 | pte_val(entry) ^= PTE_RDONLY; |
20a004e7 | 206 | pteval = pte_val(pte); |
3bbf7157 CM |
207 | do { |
208 | old_pteval = pteval; | |
209 | pteval ^= PTE_RDONLY; | |
210 | pteval |= pte_val(entry); | |
211 | pteval ^= PTE_RDONLY; | |
212 | pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); | |
213 | } while (pteval != old_pteval); | |
66dbd6e6 CM |
214 | |
215 | flush_tlb_fix_spurious_fault(vma, address); | |
216 | return 1; | |
217 | } | |
66dbd6e6 | 218 | |
9adeb8e7 LA |
219 | static bool is_el1_instruction_abort(unsigned int esr) |
220 | { | |
221 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; | |
222 | } | |
223 | ||
dbfe3828 AK |
224 | static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, |
225 | struct pt_regs *regs) | |
b824b930 SB |
226 | { |
227 | unsigned int ec = ESR_ELx_EC(esr); | |
228 | unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; | |
229 | ||
230 | if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR) | |
231 | return false; | |
232 | ||
233 | if (fsc_type == ESR_ELx_FSC_PERM) | |
234 | return true; | |
235 | ||
356607f2 | 236 | if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan()) |
b824b930 SB |
237 | return fsc_type == ESR_ELx_FSC_FAULT && |
238 | (regs->pstate & PSR_PAN_BIT); | |
239 | ||
240 | return false; | |
241 | } | |
242 | ||
42f91093 WD |
243 | static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, |
244 | unsigned int esr, | |
245 | struct pt_regs *regs) | |
246 | { | |
247 | unsigned long flags; | |
248 | u64 par, dfsc; | |
249 | ||
250 | if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || | |
251 | (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) | |
252 | return false; | |
253 | ||
254 | local_irq_save(flags); | |
255 | asm volatile("at s1e1r, %0" :: "r" (addr)); | |
256 | isb(); | |
257 | par = read_sysreg(par_el1); | |
258 | local_irq_restore(flags); | |
259 | ||
38137335 MR |
260 | /* |
261 | * If we now have a valid translation, treat the translation fault as | |
262 | * spurious. | |
263 | */ | |
42f91093 | 264 | if (!(par & SYS_PAR_EL1_F)) |
38137335 | 265 | return true; |
42f91093 WD |
266 | |
267 | /* | |
268 | * If we got a different type of fault from the AT instruction, | |
269 | * treat the translation fault as spurious. | |
270 | */ | |
308c5156 | 271 | dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); |
42f91093 WD |
272 | return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; |
273 | } | |
274 | ||
c870f14e MR |
275 | static void die_kernel_fault(const char *msg, unsigned long addr, |
276 | unsigned int esr, struct pt_regs *regs) | |
277 | { | |
278 | bust_spinlocks(1); | |
279 | ||
280 | pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, | |
281 | addr); | |
282 | ||
283 | mem_abort_decode(esr); | |
284 | ||
285 | show_pte(addr); | |
286 | die("Oops", regs, esr); | |
287 | bust_spinlocks(0); | |
288 | do_exit(SIGKILL); | |
289 | } | |
290 | ||
67ce16ec KM |
291 | static void __do_kernel_fault(unsigned long addr, unsigned int esr, |
292 | struct pt_regs *regs) | |
1d18c47c | 293 | { |
b824b930 SB |
294 | const char *msg; |
295 | ||
1d18c47c CM |
296 | /* |
297 | * Are we prepared to handle this kernel fault? | |
9adeb8e7 | 298 | * We are almost certainly not prepared to handle instruction faults. |
1d18c47c | 299 | */ |
9adeb8e7 | 300 | if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) |
1d18c47c CM |
301 | return; |
302 | ||
42f91093 WD |
303 | if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), |
304 | "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) | |
305 | return; | |
306 | ||
dbfe3828 | 307 | if (is_el1_permission_fault(addr, esr, regs)) { |
b824b930 SB |
308 | if (esr & ESR_ELx_WNR) |
309 | msg = "write to read-only memory"; | |
e44ec4a3 XZ |
310 | else if (is_el1_instruction_abort(esr)) |
311 | msg = "execute from non-executable memory"; | |
b824b930 SB |
312 | else |
313 | msg = "read from unreadable memory"; | |
314 | } else if (addr < PAGE_SIZE) { | |
315 | msg = "NULL pointer dereference"; | |
316 | } else { | |
317 | msg = "paging request"; | |
318 | } | |
319 | ||
c870f14e | 320 | die_kernel_fault(msg, addr, esr, regs); |
1d18c47c CM |
321 | } |
322 | ||
f29ad209 | 323 | static void set_thread_esr(unsigned long address, unsigned int esr) |
1d18c47c | 324 | { |
f29ad209 | 325 | current->thread.fault_address = address; |
cc198460 PM |
326 | |
327 | /* | |
328 | * If the faulting address is in the kernel, we must sanitize the ESR. | |
329 | * From userspace's point of view, kernel-only mappings don't exist | |
330 | * at all, so we report them as level 0 translation faults. | |
331 | * (This is not quite the way that "no mapping there at all" behaves: | |
332 | * an alignment fault not caused by the memory type would take | |
333 | * precedence over translation fault for a real access to empty | |
334 | * space. Unfortunately we can't easily distinguish "alignment fault | |
335 | * not caused by memory type" from "alignment fault caused by memory | |
336 | * type", so we ignore this wrinkle and just return the translation | |
337 | * fault.) | |
338 | */ | |
356607f2 | 339 | if (!is_ttbr0_addr(current->thread.fault_address)) { |
cc198460 PM |
340 | switch (ESR_ELx_EC(esr)) { |
341 | case ESR_ELx_EC_DABT_LOW: | |
342 | /* | |
343 | * These bits provide only information about the | |
344 | * faulting instruction, which userspace knows already. | |
345 | * We explicitly clear bits which are architecturally | |
346 | * RES0 in case they are given meanings in future. | |
347 | * We always report the ESR as if the fault was taken | |
348 | * to EL1 and so ISV and the bits in ISS[23:14] are | |
349 | * clear. (In fact it always will be a fault to EL1.) | |
350 | */ | |
351 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | | |
352 | ESR_ELx_CM | ESR_ELx_WNR; | |
353 | esr |= ESR_ELx_FSC_FAULT; | |
354 | break; | |
355 | case ESR_ELx_EC_IABT_LOW: | |
356 | /* | |
357 | * Claim a level 0 translation fault. | |
358 | * All other bits are architecturally RES0 for faults | |
359 | * reported with that DFSC value, so we clear them. | |
360 | */ | |
361 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; | |
362 | esr |= ESR_ELx_FSC_FAULT; | |
363 | break; | |
364 | default: | |
365 | /* | |
366 | * This should never happen (entry.S only brings us | |
367 | * into this code for insn and data aborts from a lower | |
368 | * exception level). Fail safe by not providing an ESR | |
369 | * context record at all. | |
370 | */ | |
371 | WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); | |
372 | esr = 0; | |
373 | break; | |
374 | } | |
375 | } | |
376 | ||
92ff0674 | 377 | current->thread.fault_code = esr; |
1d18c47c CM |
378 | } |
379 | ||
59f67e16 | 380 | static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
1d18c47c | 381 | { |
1d18c47c CM |
382 | /* |
383 | * If we are in kernel mode at this point, we have no context to | |
384 | * handle this fault with. | |
385 | */ | |
09a6adf5 | 386 | if (user_mode(regs)) { |
92ff0674 | 387 | const struct fault_info *inf = esr_to_fault_info(esr); |
3eb0f519 | 388 | |
effb093a | 389 | set_thread_esr(addr, esr); |
feca355b EB |
390 | arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr, |
391 | inf->name); | |
92ff0674 | 392 | } else { |
67ce16ec | 393 | __do_kernel_fault(addr, esr, regs); |
92ff0674 | 394 | } |
1d18c47c CM |
395 | } |
396 | ||
397 | #define VM_FAULT_BADMAP 0x010000 | |
398 | #define VM_FAULT_BADACCESS 0x020000 | |
399 | ||
50a7ca3c | 400 | static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, |
61681036 | 401 | unsigned int mm_flags, unsigned long vm_flags) |
1d18c47c | 402 | { |
4745224b | 403 | struct vm_area_struct *vma = find_vma(mm, addr); |
1d18c47c | 404 | |
1d18c47c | 405 | if (unlikely(!vma)) |
4745224b | 406 | return VM_FAULT_BADMAP; |
1d18c47c CM |
407 | |
408 | /* | |
409 | * Ok, we have a good vm_area for this memory access, so we can handle | |
410 | * it. | |
411 | */ | |
4745224b AK |
412 | if (unlikely(vma->vm_start > addr)) { |
413 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
414 | return VM_FAULT_BADMAP; | |
415 | if (expand_stack(vma, addr)) | |
416 | return VM_FAULT_BADMAP; | |
417 | } | |
418 | ||
db6f4106 WD |
419 | /* |
420 | * Check that the permissions on the VMA allow for the fault which | |
cab15ce6 | 421 | * occurred. |
db6f4106 | 422 | */ |
4745224b AK |
423 | if (!(vma->vm_flags & vm_flags)) |
424 | return VM_FAULT_BADACCESS; | |
dcddffd4 | 425 | return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags); |
1d18c47c CM |
426 | } |
427 | ||
541ec870 MR |
428 | static bool is_el0_instruction_abort(unsigned int esr) |
429 | { | |
430 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; | |
431 | } | |
432 | ||
c49bd02f AK |
433 | /* |
434 | * Note: not valid for EL1 DC IVAC, but we never use that such that it | |
435 | * should fault. EL0 cannot issue DC IVAC (undef). | |
436 | */ | |
437 | static bool is_write_abort(unsigned int esr) | |
438 | { | |
439 | return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); | |
440 | } | |
441 | ||
1d18c47c CM |
442 | static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, |
443 | struct pt_regs *regs) | |
444 | { | |
2d2837fa | 445 | const struct fault_info *inf; |
61681036 | 446 | struct mm_struct *mm = current->mm; |
50a7ca3c | 447 | vm_fault_t fault, major = 0; |
6cb4d9a2 | 448 | unsigned long vm_flags = VM_ACCESS_FLAGS; |
dde16072 | 449 | unsigned int mm_flags = FAULT_FLAG_DEFAULT; |
db6f4106 | 450 | |
b98cca44 | 451 | if (kprobe_page_fault(regs, esr)) |
2dd0e8d2 SP |
452 | return 0; |
453 | ||
1d18c47c CM |
454 | /* |
455 | * If we're in an interrupt or have no user context, we must not take | |
456 | * the fault. | |
457 | */ | |
70ffdb93 | 458 | if (faulthandler_disabled() || !mm) |
1d18c47c CM |
459 | goto no_context; |
460 | ||
759496ba JW |
461 | if (user_mode(regs)) |
462 | mm_flags |= FAULT_FLAG_USER; | |
463 | ||
541ec870 | 464 | if (is_el0_instruction_abort(esr)) { |
759496ba | 465 | vm_flags = VM_EXEC; |
01de1776 | 466 | mm_flags |= FAULT_FLAG_INSTRUCTION; |
c49bd02f | 467 | } else if (is_write_abort(esr)) { |
759496ba JW |
468 | vm_flags = VM_WRITE; |
469 | mm_flags |= FAULT_FLAG_WRITE; | |
470 | } | |
471 | ||
356607f2 | 472 | if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { |
e19a6ee2 JM |
473 | /* regs->orig_addr_limit may be 0 if we entered from EL0 */ |
474 | if (regs->orig_addr_limit == KERNEL_DS) | |
c870f14e MR |
475 | die_kernel_fault("access to user memory with fs=KERNEL_DS", |
476 | addr, esr, regs); | |
70544196 | 477 | |
9adeb8e7 | 478 | if (is_el1_instruction_abort(esr)) |
c870f14e MR |
479 | die_kernel_fault("execution of user memory", |
480 | addr, esr, regs); | |
9adeb8e7 | 481 | |
57f4959b | 482 | if (!search_exception_tables(regs->pc)) |
c870f14e MR |
483 | die_kernel_fault("access to user memory outside uaccess routines", |
484 | addr, esr, regs); | |
57f4959b | 485 | } |
338d4f49 | 486 | |
0e3a9026 PA |
487 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
488 | ||
1d18c47c CM |
489 | /* |
490 | * As per x86, we may deadlock here. However, since the kernel only | |
491 | * validly references user space from well defined areas of the code, | |
492 | * we can bug out early if this is from code which shouldn't. | |
493 | */ | |
494 | if (!down_read_trylock(&mm->mmap_sem)) { | |
495 | if (!user_mode(regs) && !search_exception_tables(regs->pc)) | |
496 | goto no_context; | |
497 | retry: | |
498 | down_read(&mm->mmap_sem); | |
499 | } else { | |
500 | /* | |
501 | * The above down_read_trylock() might have succeeded in which | |
502 | * case, we'll have missed the might_sleep() from down_read(). | |
503 | */ | |
504 | might_sleep(); | |
505 | #ifdef CONFIG_DEBUG_VM | |
a0509313 AK |
506 | if (!user_mode(regs) && !search_exception_tables(regs->pc)) { |
507 | up_read(&mm->mmap_sem); | |
1d18c47c | 508 | goto no_context; |
a0509313 | 509 | } |
1d18c47c CM |
510 | #endif |
511 | } | |
512 | ||
61681036 | 513 | fault = __do_page_fault(mm, addr, mm_flags, vm_flags); |
0e3a9026 | 514 | major |= fault & VM_FAULT_MAJOR; |
1d18c47c | 515 | |
b502f038 PX |
516 | /* Quick path to respond to signals */ |
517 | if (fault_signal_pending(fault, regs)) { | |
518 | if (!user_mode(regs)) | |
519 | goto no_context; | |
520 | return 0; | |
521 | } | |
0e3a9026 | 522 | |
b502f038 | 523 | if (fault & VM_FAULT_RETRY) { |
0e3a9026 | 524 | if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { |
0e3a9026 PA |
525 | mm_flags |= FAULT_FLAG_TRIED; |
526 | goto retry; | |
527 | } | |
528 | } | |
529 | up_read(&mm->mmap_sem); | |
1d18c47c CM |
530 | |
531 | /* | |
0e3a9026 | 532 | * Handle the "normal" (no error) case first. |
1d18c47c | 533 | */ |
0e3a9026 PA |
534 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | |
535 | VM_FAULT_BADACCESS)))) { | |
536 | /* | |
537 | * Major/minor page fault accounting is only done | |
538 | * once. If we go through a retry, it is extremely | |
539 | * likely that the page will be found in page cache at | |
540 | * that point. | |
541 | */ | |
542 | if (major) { | |
61681036 | 543 | current->maj_flt++; |
1d18c47c CM |
544 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, |
545 | addr); | |
546 | } else { | |
61681036 | 547 | current->min_flt++; |
1d18c47c CM |
548 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, |
549 | addr); | |
550 | } | |
1d18c47c | 551 | |
1d18c47c | 552 | return 0; |
0e3a9026 | 553 | } |
1d18c47c | 554 | |
87134102 JW |
555 | /* |
556 | * If we are in kernel mode at this point, we have no context to | |
557 | * handle this fault with. | |
558 | */ | |
559 | if (!user_mode(regs)) | |
560 | goto no_context; | |
561 | ||
1d18c47c CM |
562 | if (fault & VM_FAULT_OOM) { |
563 | /* | |
564 | * We ran out of memory, call the OOM killer, and return to | |
565 | * userspace (which will retry the fault, or kill us if we got | |
566 | * oom-killed). | |
567 | */ | |
568 | pagefault_out_of_memory(); | |
569 | return 0; | |
570 | } | |
571 | ||
2d2837fa | 572 | inf = esr_to_fault_info(esr); |
559d8d91 | 573 | set_thread_esr(addr, esr); |
1d18c47c CM |
574 | if (fault & VM_FAULT_SIGBUS) { |
575 | /* | |
576 | * We had some memory, but were unable to successfully fix up | |
577 | * this page fault. | |
578 | */ | |
feca355b EB |
579 | arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, |
580 | inf->name); | |
9ea3a974 EB |
581 | } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { |
582 | unsigned int lsb; | |
583 | ||
584 | lsb = PAGE_SHIFT; | |
585 | if (fault & VM_FAULT_HWPOISON_LARGE) | |
586 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
92ff0674 | 587 | |
b4d5557c EB |
588 | arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb, |
589 | inf->name); | |
1d18c47c CM |
590 | } else { |
591 | /* | |
592 | * Something tried to access memory that isn't in our memory | |
593 | * map. | |
594 | */ | |
feca355b EB |
595 | arm64_force_sig_fault(SIGSEGV, |
596 | fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, | |
597 | (void __user *)addr, | |
598 | inf->name); | |
1d18c47c CM |
599 | } |
600 | ||
1d18c47c CM |
601 | return 0; |
602 | ||
603 | no_context: | |
67ce16ec | 604 | __do_kernel_fault(addr, esr, regs); |
1d18c47c CM |
605 | return 0; |
606 | } | |
607 | ||
1d18c47c CM |
608 | static int __kprobes do_translation_fault(unsigned long addr, |
609 | unsigned int esr, | |
610 | struct pt_regs *regs) | |
611 | { | |
356607f2 | 612 | if (is_ttbr0_addr(addr)) |
1d18c47c CM |
613 | return do_page_fault(addr, esr, regs); |
614 | ||
615 | do_bad_area(addr, esr, regs); | |
616 | return 0; | |
617 | } | |
618 | ||
52d7523d EL |
619 | static int do_alignment_fault(unsigned long addr, unsigned int esr, |
620 | struct pt_regs *regs) | |
621 | { | |
622 | do_bad_area(addr, esr, regs); | |
623 | return 0; | |
624 | } | |
625 | ||
1d18c47c CM |
626 | static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
627 | { | |
f67d5c4f | 628 | return 1; /* "fault" */ |
1d18c47c CM |
629 | } |
630 | ||
32015c23 TB |
631 | static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
632 | { | |
32015c23 | 633 | const struct fault_info *inf; |
6fa998e8 | 634 | void __user *siaddr; |
32015c23 TB |
635 | |
636 | inf = esr_to_fault_info(esr); | |
32015c23 | 637 | |
7edda088 | 638 | /* |
d44f1b8d JM |
639 | * Return value ignored as we rely on signal merging. |
640 | * Future patches will make this more robust. | |
7edda088 | 641 | */ |
d44f1b8d | 642 | apei_claim_sea(regs); |
7edda088 | 643 | |
32015c23 | 644 | if (esr & ESR_ELx_FnV) |
6fa998e8 | 645 | siaddr = NULL; |
32015c23 | 646 | else |
6fa998e8 EB |
647 | siaddr = (void __user *)addr; |
648 | arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); | |
32015c23 | 649 | |
faa75e14 | 650 | return 0; |
32015c23 TB |
651 | } |
652 | ||
09a6adf5 | 653 | static const struct fault_info fault_info[] = { |
af40ff68 DM |
654 | { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, |
655 | { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, | |
656 | { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" }, | |
657 | { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" }, | |
7f73f7ae | 658 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, |
1d18c47c CM |
659 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, |
660 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | |
760bfb47 | 661 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
af40ff68 | 662 | { do_bad, SIGKILL, SI_KERNEL, "unknown 8" }, |
084bd298 SC |
663 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
664 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | |
1d18c47c | 665 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, |
af40ff68 | 666 | { do_bad, SIGKILL, SI_KERNEL, "unknown 12" }, |
084bd298 SC |
667 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, |
668 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | |
1d18c47c | 669 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, |
af40ff68 DM |
670 | { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, |
671 | { do_bad, SIGKILL, SI_KERNEL, "unknown 17" }, | |
672 | { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, | |
673 | { do_bad, SIGKILL, SI_KERNEL, "unknown 19" }, | |
674 | { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, | |
675 | { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" }, | |
676 | { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" }, | |
677 | { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" }, | |
678 | { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented | |
679 | { do_bad, SIGKILL, SI_KERNEL, "unknown 25" }, | |
680 | { do_bad, SIGKILL, SI_KERNEL, "unknown 26" }, | |
681 | { do_bad, SIGKILL, SI_KERNEL, "unknown 27" }, | |
682 | { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
683 | { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
684 | { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
685 | { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
686 | { do_bad, SIGKILL, SI_KERNEL, "unknown 32" }, | |
52d7523d | 687 | { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, |
af40ff68 DM |
688 | { do_bad, SIGKILL, SI_KERNEL, "unknown 34" }, |
689 | { do_bad, SIGKILL, SI_KERNEL, "unknown 35" }, | |
690 | { do_bad, SIGKILL, SI_KERNEL, "unknown 36" }, | |
691 | { do_bad, SIGKILL, SI_KERNEL, "unknown 37" }, | |
692 | { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, | |
693 | { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, | |
694 | { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, | |
695 | { do_bad, SIGKILL, SI_KERNEL, "unknown 41" }, | |
696 | { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, | |
697 | { do_bad, SIGKILL, SI_KERNEL, "unknown 43" }, | |
698 | { do_bad, SIGKILL, SI_KERNEL, "unknown 44" }, | |
699 | { do_bad, SIGKILL, SI_KERNEL, "unknown 45" }, | |
700 | { do_bad, SIGKILL, SI_KERNEL, "unknown 46" }, | |
701 | { do_bad, SIGKILL, SI_KERNEL, "unknown 47" }, | |
702 | { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" }, | |
703 | { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" }, | |
704 | { do_bad, SIGKILL, SI_KERNEL, "unknown 50" }, | |
705 | { do_bad, SIGKILL, SI_KERNEL, "unknown 51" }, | |
706 | { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" }, | |
707 | { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" }, | |
708 | { do_bad, SIGKILL, SI_KERNEL, "unknown 54" }, | |
709 | { do_bad, SIGKILL, SI_KERNEL, "unknown 55" }, | |
710 | { do_bad, SIGKILL, SI_KERNEL, "unknown 56" }, | |
711 | { do_bad, SIGKILL, SI_KERNEL, "unknown 57" }, | |
712 | { do_bad, SIGKILL, SI_KERNEL, "unknown 58" }, | |
713 | { do_bad, SIGKILL, SI_KERNEL, "unknown 59" }, | |
714 | { do_bad, SIGKILL, SI_KERNEL, "unknown 60" }, | |
715 | { do_bad, SIGKILL, SI_KERNEL, "section domain fault" }, | |
716 | { do_bad, SIGKILL, SI_KERNEL, "page domain fault" }, | |
717 | { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, | |
1d18c47c CM |
718 | }; |
719 | ||
afa7c0e5 | 720 | void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
1d18c47c | 721 | { |
09a6adf5 | 722 | const struct fault_info *inf = esr_to_fault_info(esr); |
1d18c47c CM |
723 | |
724 | if (!inf->fn(addr, esr, regs)) | |
725 | return; | |
726 | ||
1049c308 WD |
727 | if (!user_mode(regs)) { |
728 | pr_alert("Unhandled fault at 0x%016lx\n", addr); | |
729 | mem_abort_decode(esr); | |
80b6eb04 | 730 | show_pte(addr); |
1049c308 | 731 | } |
42dbf54e | 732 | |
6fa998e8 EB |
733 | arm64_notify_die(inf->name, regs, |
734 | inf->sig, inf->code, (void __user *)addr, esr); | |
1d18c47c | 735 | } |
b6e43c0e | 736 | NOKPROBE_SYMBOL(do_mem_abort); |
1d18c47c | 737 | |
afa7c0e5 | 738 | void do_el0_irq_bp_hardening(void) |
30d88c0e WD |
739 | { |
740 | /* PC has already been checked in entry.S */ | |
741 | arm64_apply_bp_hardening(); | |
742 | } | |
b6e43c0e | 743 | NOKPROBE_SYMBOL(do_el0_irq_bp_hardening); |
30d88c0e | 744 | |
afa7c0e5 | 745 | void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
1d18c47c | 746 | { |
6fa998e8 EB |
747 | arm64_notify_die("SP/PC alignment exception", regs, |
748 | SIGBUS, BUS_ADRALN, (void __user *)addr, esr); | |
1d18c47c | 749 | } |
b6e43c0e | 750 | NOKPROBE_SYMBOL(do_sp_pc_abort); |
1d18c47c | 751 | |
9fb7410f DM |
752 | int __init early_brk64(unsigned long addr, unsigned int esr, |
753 | struct pt_regs *regs); | |
754 | ||
755 | /* | |
756 | * __refdata because early_brk64 is __init, but the reference to it is | |
757 | * clobbered at arch_initcall time. | |
758 | * See traps.c and debug-monitors.c:debug_traps_init(). | |
759 | */ | |
760 | static struct fault_info __refdata debug_fault_info[] = { | |
1d18c47c CM |
761 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, |
762 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, | |
763 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, | |
af40ff68 | 764 | { do_bad, SIGKILL, SI_KERNEL, "unknown 3" }, |
1d18c47c | 765 | { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, |
af40ff68 | 766 | { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" }, |
9fb7410f | 767 | { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, |
af40ff68 | 768 | { do_bad, SIGKILL, SI_KERNEL, "unknown 7" }, |
1d18c47c CM |
769 | }; |
770 | ||
771 | void __init hook_debug_fault_code(int nr, | |
772 | int (*fn)(unsigned long, unsigned int, struct pt_regs *), | |
773 | int sig, int code, const char *name) | |
774 | { | |
775 | BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); | |
776 | ||
777 | debug_fault_info[nr].fn = fn; | |
778 | debug_fault_info[nr].sig = sig; | |
779 | debug_fault_info[nr].code = code; | |
780 | debug_fault_info[nr].name = name; | |
781 | } | |
782 | ||
d8bb6718 MH |
783 | /* |
784 | * In debug exception context, we explicitly disable preemption despite | |
785 | * having interrupts disabled. | |
786 | * This serves two purposes: it makes it much less likely that we would | |
787 | * accidentally schedule in exception context and it will force a warning | |
788 | * if we somehow manage to schedule by accident. | |
789 | */ | |
790 | static void debug_exception_enter(struct pt_regs *regs) | |
791 | { | |
792 | /* | |
793 | * Tell lockdep we disabled irqs in entry.S. Do nothing if they were | |
794 | * already disabled to preserve the last enabled/disabled addresses. | |
795 | */ | |
796 | if (interrupts_enabled(regs)) | |
797 | trace_hardirqs_off(); | |
798 | ||
799 | if (user_mode(regs)) { | |
800 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU"); | |
801 | } else { | |
802 | /* | |
803 | * We might have interrupted pretty much anything. In | |
804 | * fact, if we're a debug exception, we can even interrupt | |
805 | * NMI processing. We don't want this code makes in_nmi() | |
806 | * to return true, but we need to notify RCU. | |
807 | */ | |
808 | rcu_nmi_enter(); | |
809 | } | |
810 | ||
811 | preempt_disable(); | |
812 | ||
813 | /* This code is a bit fragile. Test it. */ | |
814 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); | |
815 | } | |
816 | NOKPROBE_SYMBOL(debug_exception_enter); | |
817 | ||
818 | static void debug_exception_exit(struct pt_regs *regs) | |
819 | { | |
820 | preempt_enable_no_resched(); | |
821 | ||
822 | if (!user_mode(regs)) | |
823 | rcu_nmi_exit(); | |
824 | ||
825 | if (interrupts_enabled(regs)) | |
826 | trace_hardirqs_on(); | |
827 | } | |
828 | NOKPROBE_SYMBOL(debug_exception_exit); | |
829 | ||
969f5ea6 WD |
830 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
831 | DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); | |
832 | ||
b6e43c0e | 833 | static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) |
969f5ea6 WD |
834 | { |
835 | if (user_mode(regs)) | |
836 | return 0; | |
837 | ||
838 | if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) | |
839 | return 0; | |
840 | ||
841 | /* | |
842 | * We've taken a dummy step exception from the kernel to ensure | |
843 | * that interrupts are re-enabled on the syscall path. Return back | |
844 | * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions | |
845 | * masked so that we can safely restore the mdscr and get on with | |
846 | * handling the syscall. | |
847 | */ | |
848 | regs->pstate |= PSR_D_BIT; | |
849 | return 1; | |
850 | } | |
851 | #else | |
b6e43c0e | 852 | static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) |
969f5ea6 WD |
853 | { |
854 | return 0; | |
855 | } | |
856 | #endif /* CONFIG_ARM64_ERRATUM_1463225 */ | |
b6e43c0e | 857 | NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler); |
969f5ea6 | 858 | |
afa7c0e5 JM |
859 | void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, |
860 | struct pt_regs *regs) | |
1d18c47c | 861 | { |
359048f9 | 862 | const struct fault_info *inf = esr_to_debug_fault_info(esr); |
b9a4b9d0 | 863 | unsigned long pc = instruction_pointer(regs); |
1d18c47c | 864 | |
969f5ea6 WD |
865 | if (cortex_a76_erratum_1463225_debug_handler(regs)) |
866 | return; | |
867 | ||
d8bb6718 | 868 | debug_exception_enter(regs); |
1d18c47c | 869 | |
b9a4b9d0 | 870 | if (user_mode(regs) && !is_ttbr0_addr(pc)) |
5dfc6ed2 WD |
871 | arm64_apply_bp_hardening(); |
872 | ||
52c6d145 | 873 | if (inf->fn(addr_if_watchpoint, esr, regs)) { |
6fa998e8 | 874 | arm64_notify_die(inf->name, regs, |
b9a4b9d0 | 875 | inf->sig, inf->code, (void __user *)pc, esr); |
6afedcd2 | 876 | } |
1d18c47c | 877 | |
d8bb6718 | 878 | debug_exception_exit(regs); |
1d18c47c | 879 | } |
2dd0e8d2 | 880 | NOKPROBE_SYMBOL(do_debug_exception); |