Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1d18c47c CM |
2 | /* |
3 | * Based on arch/arm/mm/fault.c | |
4 | * | |
5 | * Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1995-2004 Russell King | |
7 | * Copyright (C) 2012 ARM Ltd. | |
1d18c47c CM |
8 | */ |
9 | ||
d44f1b8d | 10 | #include <linux/acpi.h> |
42f91093 | 11 | #include <linux/bitfield.h> |
0edfa839 | 12 | #include <linux/extable.h> |
840b2398 | 13 | #include <linux/kfence.h> |
1d18c47c CM |
14 | #include <linux/signal.h> |
15 | #include <linux/mm.h> | |
16 | #include <linux/hardirq.h> | |
17 | #include <linux/init.h> | |
4291e9ee | 18 | #include <linux/kasan.h> |
1d18c47c CM |
19 | #include <linux/kprobes.h> |
20 | #include <linux/uaccess.h> | |
21 | #include <linux/page-flags.h> | |
3f07c014 | 22 | #include <linux/sched/signal.h> |
b17b0153 | 23 | #include <linux/sched/debug.h> |
1d18c47c CM |
24 | #include <linux/highmem.h> |
25 | #include <linux/perf_event.h> | |
7209c868 | 26 | #include <linux/preempt.h> |
e7c600f1 | 27 | #include <linux/hugetlb.h> |
1d18c47c | 28 | |
d44f1b8d | 29 | #include <asm/acpi.h> |
7209c868 | 30 | #include <asm/bug.h> |
3bbf7157 | 31 | #include <asm/cmpxchg.h> |
338d4f49 | 32 | #include <asm/cpufeature.h> |
e8dfdf31 | 33 | #include <asm/efi.h> |
1d18c47c | 34 | #include <asm/exception.h> |
9a0c0328 | 35 | #include <asm/daifflags.h> |
1d18c47c | 36 | #include <asm/debug-monitors.h> |
9141300a | 37 | #include <asm/esr.h> |
b6e43c0e | 38 | #include <asm/kprobes.h> |
98c970da | 39 | #include <asm/mte.h> |
bfe29874 | 40 | #include <asm/processor.h> |
338d4f49 | 41 | #include <asm/sysreg.h> |
1d18c47c | 42 | #include <asm/system_misc.h> |
1d18c47c | 43 | #include <asm/tlbflush.h> |
92ff0674 | 44 | #include <asm/traps.h> |
1d18c47c | 45 | |
09a6adf5 | 46 | struct fault_info { |
8d56e5c5 | 47 | int (*fn)(unsigned long far, unsigned long esr, |
09a6adf5 VK |
48 | struct pt_regs *regs); |
49 | int sig; | |
50 | int code; | |
51 | const char *name; | |
52 | }; | |
53 | ||
54 | static const struct fault_info fault_info[]; | |
359048f9 | 55 | static struct fault_info debug_fault_info[]; |
09a6adf5 | 56 | |
8d56e5c5 | 57 | static inline const struct fault_info *esr_to_fault_info(unsigned long esr) |
09a6adf5 | 58 | { |
00bbd5d9 | 59 | return fault_info + (esr & ESR_ELx_FSC); |
09a6adf5 | 60 | } |
3495386b | 61 | |
8d56e5c5 | 62 | static inline const struct fault_info *esr_to_debug_fault_info(unsigned long esr) |
359048f9 AK |
63 | { |
64 | return debug_fault_info + DBG_ESR_EVT(esr); | |
65 | } | |
66 | ||
8d56e5c5 | 67 | static void data_abort_decode(unsigned long esr) |
1f9b8936 | 68 | { |
1f9d4ba6 MB |
69 | unsigned long iss2 = ESR_ELx_ISS2(esr); |
70 | ||
1f9b8936 JT |
71 | pr_alert("Data abort info:\n"); |
72 | ||
73 | if (esr & ESR_ELx_ISV) { | |
74 | pr_alert(" Access size = %u byte(s)\n", | |
75 | 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT)); | |
76 | pr_alert(" SSE = %lu, SRT = %lu\n", | |
77 | (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT, | |
78 | (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT); | |
79 | pr_alert(" SF = %lu, AR = %lu\n", | |
80 | (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, | |
81 | (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); | |
82 | } else { | |
1f9d4ba6 MB |
83 | pr_alert(" ISV = 0, ISS = 0x%08lx, ISS2 = 0x%08lx\n", |
84 | esr & ESR_ELx_ISS_MASK, iss2); | |
1f9b8936 JT |
85 | } |
86 | ||
1f9d4ba6 | 87 | pr_alert(" CM = %lu, WnR = %lu, TnD = %lu, TagAccess = %lu\n", |
1f9b8936 | 88 | (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, |
1f9d4ba6 MB |
89 | (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT, |
90 | (iss2 & ESR_ELx_TnD) >> ESR_ELx_TnD_SHIFT, | |
91 | (iss2 & ESR_ELx_TagAccess) >> ESR_ELx_TagAccess_SHIFT); | |
92 | ||
93 | pr_alert(" GCS = %ld, Overlay = %lu, DirtyBit = %lu, Xs = %llu\n", | |
94 | (iss2 & ESR_ELx_GCS) >> ESR_ELx_GCS_SHIFT, | |
95 | (iss2 & ESR_ELx_Overlay) >> ESR_ELx_Overlay_SHIFT, | |
96 | (iss2 & ESR_ELx_DirtyBit) >> ESR_ELx_DirtyBit_SHIFT, | |
97 | (iss2 & ESR_ELx_Xs_MASK) >> ESR_ELx_Xs_SHIFT); | |
1f9b8936 JT |
98 | } |
99 | ||
8d56e5c5 | 100 | static void mem_abort_decode(unsigned long esr) |
1f9b8936 JT |
101 | { |
102 | pr_alert("Mem abort info:\n"); | |
103 | ||
8d56e5c5 | 104 | pr_alert(" ESR = 0x%016lx\n", esr); |
2951d5ef MC |
105 | pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", |
106 | ESR_ELx_EC(esr), esr_get_class_string(esr), | |
1f9b8936 JT |
107 | (esr & ESR_ELx_IL) ? 32 : 16); |
108 | pr_alert(" SET = %lu, FnV = %lu\n", | |
109 | (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, | |
110 | (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT); | |
111 | pr_alert(" EA = %lu, S1PTW = %lu\n", | |
112 | (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, | |
113 | (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); | |
8d56e5c5 | 114 | pr_alert(" FSC = 0x%02lx: %s\n", (esr & ESR_ELx_FSC), |
e0e3903f | 115 | esr_to_fault_info(esr)->name); |
1f9b8936 JT |
116 | |
117 | if (esr_is_data_abort(esr)) | |
118 | data_abort_decode(esr); | |
119 | } | |
120 | ||
e4365f96 MR |
121 | static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) |
122 | { | |
123 | /* Either init_pg_dir or swapper_pg_dir */ | |
124 | if (mm == &init_mm) | |
125 | return __pa_symbol(mm->pgd); | |
126 | ||
127 | return (unsigned long)virt_to_phys(mm->pgd); | |
128 | } | |
129 | ||
1d18c47c | 130 | /* |
67ce16ec | 131 | * Dump out the page tables associated with 'addr' in the currently active mm. |
1d18c47c | 132 | */ |
7048a597 | 133 | static void show_pte(unsigned long addr) |
1d18c47c | 134 | { |
67ce16ec | 135 | struct mm_struct *mm; |
20a004e7 WD |
136 | pgd_t *pgdp; |
137 | pgd_t pgd; | |
1d18c47c | 138 | |
356607f2 | 139 | if (is_ttbr0_addr(addr)) { |
67ce16ec KM |
140 | /* TTBR0 */ |
141 | mm = current->active_mm; | |
142 | if (mm == &init_mm) { | |
143 | pr_alert("[%016lx] user address but active_mm is swapper\n", | |
144 | addr); | |
145 | return; | |
146 | } | |
356607f2 | 147 | } else if (is_ttbr1_addr(addr)) { |
67ce16ec | 148 | /* TTBR1 */ |
1d18c47c | 149 | mm = &init_mm; |
67ce16ec KM |
150 | } else { |
151 | pr_alert("[%016lx] address between user and kernel address ranges\n", | |
152 | addr); | |
153 | return; | |
154 | } | |
1d18c47c | 155 | |
5383cc6e | 156 | pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", |
1eb34b6e | 157 | mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, |
e4365f96 | 158 | vabits_actual, mm_to_pgd_phys(mm)); |
20a004e7 WD |
159 | pgdp = pgd_offset(mm, addr); |
160 | pgd = READ_ONCE(*pgdp); | |
161 | pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); | |
1d18c47c CM |
162 | |
163 | do { | |
e9f63768 | 164 | p4d_t *p4dp, p4d; |
20a004e7 WD |
165 | pud_t *pudp, pud; |
166 | pmd_t *pmdp, pmd; | |
167 | pte_t *ptep, pte; | |
1d18c47c | 168 | |
20a004e7 | 169 | if (pgd_none(pgd) || pgd_bad(pgd)) |
1d18c47c CM |
170 | break; |
171 | ||
e9f63768 MR |
172 | p4dp = p4d_offset(pgdp, addr); |
173 | p4d = READ_ONCE(*p4dp); | |
174 | pr_cont(", p4d=%016llx", p4d_val(p4d)); | |
175 | if (p4d_none(p4d) || p4d_bad(p4d)) | |
176 | break; | |
177 | ||
178 | pudp = pud_offset(p4dp, addr); | |
20a004e7 WD |
179 | pud = READ_ONCE(*pudp); |
180 | pr_cont(", pud=%016llx", pud_val(pud)); | |
181 | if (pud_none(pud) || pud_bad(pud)) | |
1d18c47c CM |
182 | break; |
183 | ||
20a004e7 WD |
184 | pmdp = pmd_offset(pudp, addr); |
185 | pmd = READ_ONCE(*pmdp); | |
186 | pr_cont(", pmd=%016llx", pmd_val(pmd)); | |
187 | if (pmd_none(pmd) || pmd_bad(pmd)) | |
1d18c47c CM |
188 | break; |
189 | ||
20a004e7 WD |
190 | ptep = pte_offset_map(pmdp, addr); |
191 | pte = READ_ONCE(*ptep); | |
192 | pr_cont(", pte=%016llx", pte_val(pte)); | |
193 | pte_unmap(ptep); | |
1d18c47c CM |
194 | } while(0); |
195 | ||
6ef4fb38 | 196 | pr_cont("\n"); |
1d18c47c CM |
197 | } |
198 | ||
66dbd6e6 CM |
199 | /* |
200 | * This function sets the access flags (dirty, accessed), as well as write | |
201 | * permission, and only to a more permissive setting. | |
202 | * | |
203 | * It needs to cope with hardware update of the accessed/dirty state by other | |
204 | * agents in the system and can safely skip the __sync_icache_dcache() call as, | |
205 | * like set_pte_at(), the PTE is never changed from no-exec to exec here. | |
206 | * | |
207 | * Returns whether or not the PTE actually changed. | |
208 | */ | |
209 | int ptep_set_access_flags(struct vm_area_struct *vma, | |
210 | unsigned long address, pte_t *ptep, | |
211 | pte_t entry, int dirty) | |
212 | { | |
3bbf7157 | 213 | pteval_t old_pteval, pteval; |
20a004e7 | 214 | pte_t pte = READ_ONCE(*ptep); |
66dbd6e6 | 215 | |
20a004e7 | 216 | if (pte_same(pte, entry)) |
66dbd6e6 CM |
217 | return 0; |
218 | ||
219 | /* only preserve the access flags and write permission */ | |
73e86cb0 | 220 | pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY; |
66dbd6e6 CM |
221 | |
222 | /* | |
223 | * Setting the flags must be done atomically to avoid racing with the | |
6d332747 CM |
224 | * hardware update of the access/dirty state. The PTE_RDONLY bit must |
225 | * be set to the most permissive (lowest value) of *ptep and entry | |
226 | * (calculated as: a & b == ~(~a | ~b)). | |
66dbd6e6 | 227 | */ |
6d332747 | 228 | pte_val(entry) ^= PTE_RDONLY; |
20a004e7 | 229 | pteval = pte_val(pte); |
3bbf7157 CM |
230 | do { |
231 | old_pteval = pteval; | |
232 | pteval ^= PTE_RDONLY; | |
233 | pteval |= pte_val(entry); | |
234 | pteval ^= PTE_RDONLY; | |
235 | pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); | |
236 | } while (pteval != old_pteval); | |
66dbd6e6 | 237 | |
6a1bdb17 WD |
238 | /* Invalidate a stale read-only entry */ |
239 | if (dirty) | |
240 | flush_tlb_page(vma, address); | |
66dbd6e6 CM |
241 | return 1; |
242 | } | |
66dbd6e6 | 243 | |
8d56e5c5 | 244 | static bool is_el1_instruction_abort(unsigned long esr) |
9adeb8e7 LA |
245 | { |
246 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; | |
247 | } | |
248 | ||
8d56e5c5 | 249 | static bool is_el1_data_abort(unsigned long esr) |
fcf9dc02 KW |
250 | { |
251 | return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR; | |
252 | } | |
253 | ||
8d56e5c5 | 254 | static inline bool is_el1_permission_fault(unsigned long addr, unsigned long esr, |
dbfe3828 | 255 | struct pt_regs *regs) |
b824b930 | 256 | { |
8d56e5c5 | 257 | unsigned long fsc_type = esr & ESR_ELx_FSC_TYPE; |
b824b930 | 258 | |
fcf9dc02 | 259 | if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr)) |
b824b930 SB |
260 | return false; |
261 | ||
262 | if (fsc_type == ESR_ELx_FSC_PERM) | |
263 | return true; | |
264 | ||
356607f2 | 265 | if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan()) |
b824b930 SB |
266 | return fsc_type == ESR_ELx_FSC_FAULT && |
267 | (regs->pstate & PSR_PAN_BIT); | |
268 | ||
269 | return false; | |
270 | } | |
271 | ||
42f91093 | 272 | static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, |
8d56e5c5 | 273 | unsigned long esr, |
42f91093 WD |
274 | struct pt_regs *regs) |
275 | { | |
276 | unsigned long flags; | |
277 | u64 par, dfsc; | |
278 | ||
fcf9dc02 | 279 | if (!is_el1_data_abort(esr) || |
42f91093 WD |
280 | (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) |
281 | return false; | |
282 | ||
283 | local_irq_save(flags); | |
284 | asm volatile("at s1e1r, %0" :: "r" (addr)); | |
285 | isb(); | |
96d389ca | 286 | par = read_sysreg_par(); |
42f91093 WD |
287 | local_irq_restore(flags); |
288 | ||
38137335 MR |
289 | /* |
290 | * If we now have a valid translation, treat the translation fault as | |
291 | * spurious. | |
292 | */ | |
42f91093 | 293 | if (!(par & SYS_PAR_EL1_F)) |
38137335 | 294 | return true; |
42f91093 WD |
295 | |
296 | /* | |
297 | * If we got a different type of fault from the AT instruction, | |
298 | * treat the translation fault as spurious. | |
299 | */ | |
308c5156 | 300 | dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); |
42f91093 WD |
301 | return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; |
302 | } | |
303 | ||
c870f14e | 304 | static void die_kernel_fault(const char *msg, unsigned long addr, |
8d56e5c5 | 305 | unsigned long esr, struct pt_regs *regs) |
c870f14e MR |
306 | { |
307 | bust_spinlocks(1); | |
308 | ||
309 | pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, | |
310 | addr); | |
311 | ||
07b742a4 MR |
312 | kasan_non_canonical_hook(addr); |
313 | ||
c870f14e MR |
314 | mem_abort_decode(esr); |
315 | ||
316 | show_pte(addr); | |
317 | die("Oops", regs, esr); | |
318 | bust_spinlocks(0); | |
0e25498f | 319 | make_task_dead(SIGKILL); |
c870f14e MR |
320 | } |
321 | ||
4291e9ee | 322 | #ifdef CONFIG_KASAN_HW_TAGS |
8d56e5c5 | 323 | static void report_tag_fault(unsigned long addr, unsigned long esr, |
98c970da VF |
324 | struct pt_regs *regs) |
325 | { | |
4291e9ee AK |
326 | /* |
327 | * SAS bits aren't set for all faults reported in EL1, so we can't | |
328 | * find out access size. | |
329 | */ | |
76721503 | 330 | bool is_write = !!(esr & ESR_ELx_WNR); |
4291e9ee | 331 | kasan_report(addr, 0, is_write, regs->pc); |
98c970da | 332 | } |
4291e9ee AK |
333 | #else |
334 | /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ | |
8d56e5c5 | 335 | static inline void report_tag_fault(unsigned long addr, unsigned long esr, |
4291e9ee AK |
336 | struct pt_regs *regs) { } |
337 | #endif | |
98c970da | 338 | |
8d56e5c5 | 339 | static void do_tag_recovery(unsigned long addr, unsigned long esr, |
98c970da VF |
340 | struct pt_regs *regs) |
341 | { | |
98c970da | 342 | |
f05842cf | 343 | report_tag_fault(addr, esr, regs); |
98c970da VF |
344 | |
345 | /* | |
346 | * Disable MTE Tag Checking on the local CPU for the current EL. | |
347 | * It will be done lazily on the other CPUs when they will hit a | |
348 | * tag fault. | |
349 | */ | |
bc249e37 MB |
350 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF_MASK, |
351 | SYS_FIELD_PREP_ENUM(SCTLR_EL1, TCF, NONE)); | |
98c970da VF |
352 | isb(); |
353 | } | |
354 | ||
8d56e5c5 | 355 | static bool is_el1_mte_sync_tag_check_fault(unsigned long esr) |
98c970da | 356 | { |
8d56e5c5 | 357 | unsigned long fsc = esr & ESR_ELx_FSC; |
98c970da | 358 | |
fcf9dc02 | 359 | if (!is_el1_data_abort(esr)) |
98c970da VF |
360 | return false; |
361 | ||
362 | if (fsc == ESR_ELx_FSC_MTE) | |
363 | return true; | |
364 | ||
365 | return false; | |
366 | } | |
367 | ||
0bb1fbff MR |
368 | static bool is_translation_fault(unsigned long esr) |
369 | { | |
370 | return (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_FAULT; | |
371 | } | |
372 | ||
8d56e5c5 | 373 | static void __do_kernel_fault(unsigned long addr, unsigned long esr, |
67ce16ec | 374 | struct pt_regs *regs) |
1d18c47c | 375 | { |
b824b930 SB |
376 | const char *msg; |
377 | ||
1d18c47c CM |
378 | /* |
379 | * Are we prepared to handle this kernel fault? | |
9adeb8e7 | 380 | * We are almost certainly not prepared to handle instruction faults. |
1d18c47c | 381 | */ |
9adeb8e7 | 382 | if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) |
1d18c47c CM |
383 | return; |
384 | ||
42f91093 WD |
385 | if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), |
386 | "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) | |
387 | return; | |
388 | ||
98c970da VF |
389 | if (is_el1_mte_sync_tag_check_fault(esr)) { |
390 | do_tag_recovery(addr, esr, regs); | |
391 | ||
392 | return; | |
393 | } | |
394 | ||
dbfe3828 | 395 | if (is_el1_permission_fault(addr, esr, regs)) { |
b824b930 SB |
396 | if (esr & ESR_ELx_WNR) |
397 | msg = "write to read-only memory"; | |
e44ec4a3 XZ |
398 | else if (is_el1_instruction_abort(esr)) |
399 | msg = "execute from non-executable memory"; | |
b824b930 SB |
400 | else |
401 | msg = "read from unreadable memory"; | |
402 | } else if (addr < PAGE_SIZE) { | |
403 | msg = "NULL pointer dereference"; | |
404 | } else { | |
0bb1fbff MR |
405 | if (is_translation_fault(esr) && |
406 | kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs)) | |
840b2398 ME |
407 | return; |
408 | ||
b824b930 SB |
409 | msg = "paging request"; |
410 | } | |
411 | ||
e8dfdf31 AB |
412 | if (efi_runtime_fixup_exception(regs, msg)) |
413 | return; | |
414 | ||
c870f14e | 415 | die_kernel_fault(msg, addr, esr, regs); |
1d18c47c CM |
416 | } |
417 | ||
8d56e5c5 | 418 | static void set_thread_esr(unsigned long address, unsigned long esr) |
1d18c47c | 419 | { |
f29ad209 | 420 | current->thread.fault_address = address; |
cc198460 PM |
421 | |
422 | /* | |
423 | * If the faulting address is in the kernel, we must sanitize the ESR. | |
424 | * From userspace's point of view, kernel-only mappings don't exist | |
425 | * at all, so we report them as level 0 translation faults. | |
426 | * (This is not quite the way that "no mapping there at all" behaves: | |
427 | * an alignment fault not caused by the memory type would take | |
428 | * precedence over translation fault for a real access to empty | |
429 | * space. Unfortunately we can't easily distinguish "alignment fault | |
430 | * not caused by memory type" from "alignment fault caused by memory | |
431 | * type", so we ignore this wrinkle and just return the translation | |
432 | * fault.) | |
433 | */ | |
356607f2 | 434 | if (!is_ttbr0_addr(current->thread.fault_address)) { |
cc198460 PM |
435 | switch (ESR_ELx_EC(esr)) { |
436 | case ESR_ELx_EC_DABT_LOW: | |
437 | /* | |
438 | * These bits provide only information about the | |
439 | * faulting instruction, which userspace knows already. | |
440 | * We explicitly clear bits which are architecturally | |
441 | * RES0 in case they are given meanings in future. | |
442 | * We always report the ESR as if the fault was taken | |
443 | * to EL1 and so ISV and the bits in ISS[23:14] are | |
444 | * clear. (In fact it always will be a fault to EL1.) | |
445 | */ | |
446 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | | |
447 | ESR_ELx_CM | ESR_ELx_WNR; | |
448 | esr |= ESR_ELx_FSC_FAULT; | |
449 | break; | |
450 | case ESR_ELx_EC_IABT_LOW: | |
451 | /* | |
452 | * Claim a level 0 translation fault. | |
453 | * All other bits are architecturally RES0 for faults | |
454 | * reported with that DFSC value, so we clear them. | |
455 | */ | |
456 | esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; | |
457 | esr |= ESR_ELx_FSC_FAULT; | |
458 | break; | |
459 | default: | |
460 | /* | |
461 | * This should never happen (entry.S only brings us | |
462 | * into this code for insn and data aborts from a lower | |
463 | * exception level). Fail safe by not providing an ESR | |
464 | * context record at all. | |
465 | */ | |
8d56e5c5 | 466 | WARN(1, "ESR 0x%lx is not DABT or IABT from EL0\n", esr); |
cc198460 PM |
467 | esr = 0; |
468 | break; | |
469 | } | |
470 | } | |
471 | ||
92ff0674 | 472 | current->thread.fault_code = esr; |
1d18c47c CM |
473 | } |
474 | ||
8d56e5c5 | 475 | static void do_bad_area(unsigned long far, unsigned long esr, |
dceec3ff | 476 | struct pt_regs *regs) |
1d18c47c | 477 | { |
dceec3ff PC |
478 | unsigned long addr = untagged_addr(far); |
479 | ||
1d18c47c CM |
480 | /* |
481 | * If we are in kernel mode at this point, we have no context to | |
482 | * handle this fault with. | |
483 | */ | |
09a6adf5 | 484 | if (user_mode(regs)) { |
92ff0674 | 485 | const struct fault_info *inf = esr_to_fault_info(esr); |
3eb0f519 | 486 | |
effb093a | 487 | set_thread_esr(addr, esr); |
dceec3ff | 488 | arm64_force_sig_fault(inf->sig, inf->code, far, inf->name); |
92ff0674 | 489 | } else { |
67ce16ec | 490 | __do_kernel_fault(addr, esr, regs); |
92ff0674 | 491 | } |
1d18c47c CM |
492 | } |
493 | ||
d91d5808 MHC |
494 | #define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) |
495 | #define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) | |
1d18c47c | 496 | |
50a7ca3c | 497 | static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, |
6a1bb025 PX |
498 | unsigned int mm_flags, unsigned long vm_flags, |
499 | struct pt_regs *regs) | |
1d18c47c | 500 | { |
4745224b | 501 | struct vm_area_struct *vma = find_vma(mm, addr); |
1d18c47c | 502 | |
1d18c47c | 503 | if (unlikely(!vma)) |
4745224b | 504 | return VM_FAULT_BADMAP; |
1d18c47c CM |
505 | |
506 | /* | |
507 | * Ok, we have a good vm_area for this memory access, so we can handle | |
508 | * it. | |
509 | */ | |
4745224b AK |
510 | if (unlikely(vma->vm_start > addr)) { |
511 | if (!(vma->vm_flags & VM_GROWSDOWN)) | |
512 | return VM_FAULT_BADMAP; | |
513 | if (expand_stack(vma, addr)) | |
514 | return VM_FAULT_BADMAP; | |
515 | } | |
516 | ||
db6f4106 WD |
517 | /* |
518 | * Check that the permissions on the VMA allow for the fault which | |
cab15ce6 | 519 | * occurred. |
db6f4106 | 520 | */ |
4745224b AK |
521 | if (!(vma->vm_flags & vm_flags)) |
522 | return VM_FAULT_BADACCESS; | |
84c5e23e | 523 | return handle_mm_fault(vma, addr, mm_flags, regs); |
1d18c47c CM |
524 | } |
525 | ||
8d56e5c5 | 526 | static bool is_el0_instruction_abort(unsigned long esr) |
541ec870 MR |
527 | { |
528 | return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; | |
529 | } | |
530 | ||
c49bd02f AK |
531 | /* |
532 | * Note: not valid for EL1 DC IVAC, but we never use that such that it | |
533 | * should fault. EL0 cannot issue DC IVAC (undef). | |
534 | */ | |
8d56e5c5 | 535 | static bool is_write_abort(unsigned long esr) |
c49bd02f AK |
536 | { |
537 | return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); | |
538 | } | |
539 | ||
8d56e5c5 | 540 | static int __kprobes do_page_fault(unsigned long far, unsigned long esr, |
1d18c47c CM |
541 | struct pt_regs *regs) |
542 | { | |
2d2837fa | 543 | const struct fault_info *inf; |
61681036 | 544 | struct mm_struct *mm = current->mm; |
6a1bb025 | 545 | vm_fault_t fault; |
18107f8a | 546 | unsigned long vm_flags; |
dde16072 | 547 | unsigned int mm_flags = FAULT_FLAG_DEFAULT; |
dceec3ff | 548 | unsigned long addr = untagged_addr(far); |
cd7f176a SB |
549 | #ifdef CONFIG_PER_VMA_LOCK |
550 | struct vm_area_struct *vma; | |
551 | #endif | |
db6f4106 | 552 | |
b98cca44 | 553 | if (kprobe_page_fault(regs, esr)) |
2dd0e8d2 SP |
554 | return 0; |
555 | ||
1d18c47c CM |
556 | /* |
557 | * If we're in an interrupt or have no user context, we must not take | |
558 | * the fault. | |
559 | */ | |
70ffdb93 | 560 | if (faulthandler_disabled() || !mm) |
1d18c47c CM |
561 | goto no_context; |
562 | ||
759496ba JW |
563 | if (user_mode(regs)) |
564 | mm_flags |= FAULT_FLAG_USER; | |
565 | ||
18107f8a VM |
566 | /* |
567 | * vm_flags tells us what bits we must have in vma->vm_flags | |
568 | * for the fault to be benign, __do_page_fault() would check | |
569 | * vma->vm_flags & vm_flags and returns an error if the | |
570 | * intersection is empty | |
571 | */ | |
541ec870 | 572 | if (is_el0_instruction_abort(esr)) { |
18107f8a | 573 | /* It was exec fault */ |
759496ba | 574 | vm_flags = VM_EXEC; |
01de1776 | 575 | mm_flags |= FAULT_FLAG_INSTRUCTION; |
c49bd02f | 576 | } else if (is_write_abort(esr)) { |
18107f8a | 577 | /* It was write fault */ |
759496ba JW |
578 | vm_flags = VM_WRITE; |
579 | mm_flags |= FAULT_FLAG_WRITE; | |
18107f8a VM |
580 | } else { |
581 | /* It was read fault */ | |
582 | vm_flags = VM_READ; | |
583 | /* Write implies read */ | |
584 | vm_flags |= VM_WRITE; | |
585 | /* If EPAN is absent then exec implies read */ | |
586 | if (!cpus_have_const_cap(ARM64_HAS_EPAN)) | |
587 | vm_flags |= VM_EXEC; | |
759496ba JW |
588 | } |
589 | ||
356607f2 | 590 | if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { |
9adeb8e7 | 591 | if (is_el1_instruction_abort(esr)) |
c870f14e MR |
592 | die_kernel_fault("execution of user memory", |
593 | addr, esr, regs); | |
9adeb8e7 | 594 | |
57f4959b | 595 | if (!search_exception_tables(regs->pc)) |
c870f14e MR |
596 | die_kernel_fault("access to user memory outside uaccess routines", |
597 | addr, esr, regs); | |
57f4959b | 598 | } |
338d4f49 | 599 | |
0e3a9026 PA |
600 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
601 | ||
cd7f176a SB |
602 | #ifdef CONFIG_PER_VMA_LOCK |
603 | if (!(mm_flags & FAULT_FLAG_USER)) | |
604 | goto lock_mmap; | |
605 | ||
606 | vma = lock_vma_under_rcu(mm, addr); | |
607 | if (!vma) | |
608 | goto lock_mmap; | |
609 | ||
610 | if (!(vma->vm_flags & vm_flags)) { | |
611 | vma_end_read(vma); | |
612 | goto lock_mmap; | |
613 | } | |
614 | fault = handle_mm_fault(vma, addr & PAGE_MASK, | |
615 | mm_flags | FAULT_FLAG_VMA_LOCK, regs); | |
616 | vma_end_read(vma); | |
617 | ||
618 | if (!(fault & VM_FAULT_RETRY)) { | |
619 | count_vm_vma_lock_event(VMA_LOCK_SUCCESS); | |
620 | goto done; | |
621 | } | |
622 | count_vm_vma_lock_event(VMA_LOCK_RETRY); | |
623 | ||
624 | /* Quick path to respond to signals */ | |
625 | if (fault_signal_pending(fault, regs)) { | |
626 | if (!user_mode(regs)) | |
627 | goto no_context; | |
628 | return 0; | |
629 | } | |
630 | lock_mmap: | |
631 | #endif /* CONFIG_PER_VMA_LOCK */ | |
1d18c47c CM |
632 | /* |
633 | * As per x86, we may deadlock here. However, since the kernel only | |
634 | * validly references user space from well defined areas of the code, | |
635 | * we can bug out early if this is from code which shouldn't. | |
636 | */ | |
d8ed45c5 | 637 | if (!mmap_read_trylock(mm)) { |
1d18c47c CM |
638 | if (!user_mode(regs) && !search_exception_tables(regs->pc)) |
639 | goto no_context; | |
640 | retry: | |
d8ed45c5 | 641 | mmap_read_lock(mm); |
1d18c47c CM |
642 | } else { |
643 | /* | |
abd4737f | 644 | * The above mmap_read_trylock() might have succeeded in which |
1d18c47c CM |
645 | * case, we'll have missed the might_sleep() from down_read(). |
646 | */ | |
647 | might_sleep(); | |
648 | #ifdef CONFIG_DEBUG_VM | |
a0509313 | 649 | if (!user_mode(regs) && !search_exception_tables(regs->pc)) { |
d8ed45c5 | 650 | mmap_read_unlock(mm); |
1d18c47c | 651 | goto no_context; |
a0509313 | 652 | } |
1d18c47c CM |
653 | #endif |
654 | } | |
655 | ||
6a1bb025 | 656 | fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs); |
1d18c47c | 657 | |
b502f038 PX |
658 | /* Quick path to respond to signals */ |
659 | if (fault_signal_pending(fault, regs)) { | |
660 | if (!user_mode(regs)) | |
661 | goto no_context; | |
662 | return 0; | |
663 | } | |
0e3a9026 | 664 | |
d9272525 PX |
665 | /* The fault is fully completed (including releasing mmap lock) */ |
666 | if (fault & VM_FAULT_COMPLETED) | |
667 | return 0; | |
668 | ||
b502f038 | 669 | if (fault & VM_FAULT_RETRY) { |
36ef159f QZ |
670 | mm_flags |= FAULT_FLAG_TRIED; |
671 | goto retry; | |
0e3a9026 | 672 | } |
d8ed45c5 | 673 | mmap_read_unlock(mm); |
1d18c47c | 674 | |
cd7f176a SB |
675 | #ifdef CONFIG_PER_VMA_LOCK |
676 | done: | |
677 | #endif | |
1d18c47c | 678 | /* |
0e3a9026 | 679 | * Handle the "normal" (no error) case first. |
1d18c47c | 680 | */ |
0e3a9026 | 681 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | |
6a1bb025 | 682 | VM_FAULT_BADACCESS)))) |
1d18c47c CM |
683 | return 0; |
684 | ||
87134102 JW |
685 | /* |
686 | * If we are in kernel mode at this point, we have no context to | |
687 | * handle this fault with. | |
688 | */ | |
689 | if (!user_mode(regs)) | |
690 | goto no_context; | |
691 | ||
1d18c47c CM |
692 | if (fault & VM_FAULT_OOM) { |
693 | /* | |
694 | * We ran out of memory, call the OOM killer, and return to | |
695 | * userspace (which will retry the fault, or kill us if we got | |
696 | * oom-killed). | |
697 | */ | |
698 | pagefault_out_of_memory(); | |
699 | return 0; | |
700 | } | |
701 | ||
2d2837fa | 702 | inf = esr_to_fault_info(esr); |
559d8d91 | 703 | set_thread_esr(addr, esr); |
1d18c47c CM |
704 | if (fault & VM_FAULT_SIGBUS) { |
705 | /* | |
706 | * We had some memory, but were unable to successfully fix up | |
707 | * this page fault. | |
708 | */ | |
dceec3ff | 709 | arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name); |
9ea3a974 EB |
710 | } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { |
711 | unsigned int lsb; | |
712 | ||
713 | lsb = PAGE_SHIFT; | |
714 | if (fault & VM_FAULT_HWPOISON_LARGE) | |
715 | lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); | |
92ff0674 | 716 | |
dceec3ff | 717 | arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name); |
1d18c47c CM |
718 | } else { |
719 | /* | |
720 | * Something tried to access memory that isn't in our memory | |
721 | * map. | |
722 | */ | |
feca355b EB |
723 | arm64_force_sig_fault(SIGSEGV, |
724 | fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, | |
dceec3ff | 725 | far, inf->name); |
1d18c47c CM |
726 | } |
727 | ||
1d18c47c CM |
728 | return 0; |
729 | ||
730 | no_context: | |
67ce16ec | 731 | __do_kernel_fault(addr, esr, regs); |
1d18c47c CM |
732 | return 0; |
733 | } | |
734 | ||
dceec3ff | 735 | static int __kprobes do_translation_fault(unsigned long far, |
8d56e5c5 | 736 | unsigned long esr, |
1d18c47c CM |
737 | struct pt_regs *regs) |
738 | { | |
dceec3ff PC |
739 | unsigned long addr = untagged_addr(far); |
740 | ||
356607f2 | 741 | if (is_ttbr0_addr(addr)) |
dceec3ff | 742 | return do_page_fault(far, esr, regs); |
1d18c47c | 743 | |
dceec3ff | 744 | do_bad_area(far, esr, regs); |
1d18c47c CM |
745 | return 0; |
746 | } | |
747 | ||
8d56e5c5 | 748 | static int do_alignment_fault(unsigned long far, unsigned long esr, |
52d7523d EL |
749 | struct pt_regs *regs) |
750 | { | |
3fc24ef3 AB |
751 | if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) && |
752 | compat_user_mode(regs)) | |
753 | return do_compat_alignment_fixup(far, regs); | |
dceec3ff | 754 | do_bad_area(far, esr, regs); |
52d7523d EL |
755 | return 0; |
756 | } | |
757 | ||
8d56e5c5 | 758 | static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) |
1d18c47c | 759 | { |
f67d5c4f | 760 | return 1; /* "fault" */ |
1d18c47c CM |
761 | } |
762 | ||
8d56e5c5 | 763 | static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) |
32015c23 | 764 | { |
32015c23 | 765 | const struct fault_info *inf; |
dceec3ff | 766 | unsigned long siaddr; |
32015c23 TB |
767 | |
768 | inf = esr_to_fault_info(esr); | |
32015c23 | 769 | |
8fcc4ae6 JM |
770 | if (user_mode(regs) && apei_claim_sea(regs) == 0) { |
771 | /* | |
772 | * APEI claimed this as a firmware-first notification. | |
773 | * Some processing deferred to task_work before ret_to_user(). | |
774 | */ | |
775 | return 0; | |
776 | } | |
7edda088 | 777 | |
dceec3ff PC |
778 | if (esr & ESR_ELx_FnV) { |
779 | siaddr = 0; | |
780 | } else { | |
781 | /* | |
782 | * The architecture specifies that the tag bits of FAR_EL1 are | |
783 | * UNKNOWN for synchronous external aborts. Mask them out now | |
784 | * so that userspace doesn't see them. | |
785 | */ | |
786 | siaddr = untagged_addr(far); | |
787 | } | |
6fa998e8 | 788 | arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); |
32015c23 | 789 | |
faa75e14 | 790 | return 0; |
32015c23 TB |
791 | } |
792 | ||
8d56e5c5 | 793 | static int do_tag_check_fault(unsigned long far, unsigned long esr, |
637ec831 VF |
794 | struct pt_regs *regs) |
795 | { | |
dceec3ff | 796 | /* |
3ed86b9a AK |
797 | * The architecture specifies that bits 63:60 of FAR_EL1 are UNKNOWN |
798 | * for tag check faults. Set them to corresponding bits in the untagged | |
799 | * address. | |
dceec3ff | 800 | */ |
3ed86b9a | 801 | far = (__untagged_addr(far) & ~MTE_TAG_MASK) | (far & MTE_TAG_MASK); |
dceec3ff | 802 | do_bad_area(far, esr, regs); |
637ec831 VF |
803 | return 0; |
804 | } | |
805 | ||
09a6adf5 | 806 | static const struct fault_info fault_info[] = { |
af40ff68 DM |
807 | { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, |
808 | { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, | |
809 | { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" }, | |
810 | { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" }, | |
7f73f7ae | 811 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, |
1d18c47c CM |
812 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, |
813 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | |
760bfb47 | 814 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
af40ff68 | 815 | { do_bad, SIGKILL, SI_KERNEL, "unknown 8" }, |
084bd298 SC |
816 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
817 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | |
1d18c47c | 818 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, |
af40ff68 | 819 | { do_bad, SIGKILL, SI_KERNEL, "unknown 12" }, |
084bd298 SC |
820 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, |
821 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | |
1d18c47c | 822 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, |
af40ff68 | 823 | { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, |
637ec831 | 824 | { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" }, |
af40ff68 DM |
825 | { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, |
826 | { do_bad, SIGKILL, SI_KERNEL, "unknown 19" }, | |
827 | { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, | |
828 | { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" }, | |
829 | { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" }, | |
830 | { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" }, | |
831 | { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented | |
832 | { do_bad, SIGKILL, SI_KERNEL, "unknown 25" }, | |
833 | { do_bad, SIGKILL, SI_KERNEL, "unknown 26" }, | |
834 | { do_bad, SIGKILL, SI_KERNEL, "unknown 27" }, | |
835 | { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
836 | { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
837 | { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
838 | { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented | |
839 | { do_bad, SIGKILL, SI_KERNEL, "unknown 32" }, | |
52d7523d | 840 | { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, |
af40ff68 DM |
841 | { do_bad, SIGKILL, SI_KERNEL, "unknown 34" }, |
842 | { do_bad, SIGKILL, SI_KERNEL, "unknown 35" }, | |
843 | { do_bad, SIGKILL, SI_KERNEL, "unknown 36" }, | |
844 | { do_bad, SIGKILL, SI_KERNEL, "unknown 37" }, | |
845 | { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, | |
846 | { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, | |
847 | { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, | |
848 | { do_bad, SIGKILL, SI_KERNEL, "unknown 41" }, | |
849 | { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, | |
850 | { do_bad, SIGKILL, SI_KERNEL, "unknown 43" }, | |
851 | { do_bad, SIGKILL, SI_KERNEL, "unknown 44" }, | |
852 | { do_bad, SIGKILL, SI_KERNEL, "unknown 45" }, | |
853 | { do_bad, SIGKILL, SI_KERNEL, "unknown 46" }, | |
854 | { do_bad, SIGKILL, SI_KERNEL, "unknown 47" }, | |
855 | { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" }, | |
856 | { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" }, | |
857 | { do_bad, SIGKILL, SI_KERNEL, "unknown 50" }, | |
858 | { do_bad, SIGKILL, SI_KERNEL, "unknown 51" }, | |
859 | { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" }, | |
860 | { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" }, | |
861 | { do_bad, SIGKILL, SI_KERNEL, "unknown 54" }, | |
862 | { do_bad, SIGKILL, SI_KERNEL, "unknown 55" }, | |
863 | { do_bad, SIGKILL, SI_KERNEL, "unknown 56" }, | |
864 | { do_bad, SIGKILL, SI_KERNEL, "unknown 57" }, | |
865 | { do_bad, SIGKILL, SI_KERNEL, "unknown 58" }, | |
866 | { do_bad, SIGKILL, SI_KERNEL, "unknown 59" }, | |
867 | { do_bad, SIGKILL, SI_KERNEL, "unknown 60" }, | |
868 | { do_bad, SIGKILL, SI_KERNEL, "section domain fault" }, | |
869 | { do_bad, SIGKILL, SI_KERNEL, "page domain fault" }, | |
870 | { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, | |
1d18c47c CM |
871 | }; |
872 | ||
8d56e5c5 | 873 | void do_mem_abort(unsigned long far, unsigned long esr, struct pt_regs *regs) |
1d18c47c | 874 | { |
09a6adf5 | 875 | const struct fault_info *inf = esr_to_fault_info(esr); |
dceec3ff | 876 | unsigned long addr = untagged_addr(far); |
1d18c47c | 877 | |
dceec3ff | 878 | if (!inf->fn(far, esr, regs)) |
1d18c47c CM |
879 | return; |
880 | ||
6f6cfa58 MR |
881 | if (!user_mode(regs)) |
882 | die_kernel_fault(inf->name, addr, esr, regs); | |
42dbf54e | 883 | |
dceec3ff PC |
884 | /* |
885 | * At this point we have an unrecognized fault type whose tag bits may | |
886 | * have been defined as UNKNOWN. Therefore we only expose the untagged | |
887 | * address to the signal handler. | |
888 | */ | |
889 | arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr); | |
1d18c47c | 890 | } |
b6e43c0e | 891 | NOKPROBE_SYMBOL(do_mem_abort); |
1d18c47c | 892 | |
8d56e5c5 | 893 | void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs) |
1d18c47c | 894 | { |
dceec3ff PC |
895 | arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, |
896 | addr, esr); | |
1d18c47c | 897 | } |
b6e43c0e | 898 | NOKPROBE_SYMBOL(do_sp_pc_abort); |
1d18c47c | 899 | |
9fb7410f DM |
900 | /* |
901 | * __refdata because early_brk64 is __init, but the reference to it is | |
902 | * clobbered at arch_initcall time. | |
903 | * See traps.c and debug-monitors.c:debug_traps_init(). | |
904 | */ | |
905 | static struct fault_info __refdata debug_fault_info[] = { | |
1d18c47c CM |
906 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, |
907 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, | |
908 | { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, | |
af40ff68 | 909 | { do_bad, SIGKILL, SI_KERNEL, "unknown 3" }, |
1d18c47c | 910 | { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, |
af40ff68 | 911 | { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" }, |
9fb7410f | 912 | { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, |
af40ff68 | 913 | { do_bad, SIGKILL, SI_KERNEL, "unknown 7" }, |
1d18c47c CM |
914 | }; |
915 | ||
916 | void __init hook_debug_fault_code(int nr, | |
8d56e5c5 | 917 | int (*fn)(unsigned long, unsigned long, struct pt_regs *), |
1d18c47c CM |
918 | int sig, int code, const char *name) |
919 | { | |
920 | BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); | |
921 | ||
922 | debug_fault_info[nr].fn = fn; | |
923 | debug_fault_info[nr].sig = sig; | |
924 | debug_fault_info[nr].code = code; | |
925 | debug_fault_info[nr].name = name; | |
926 | } | |
927 | ||
d8bb6718 MH |
928 | /* |
929 | * In debug exception context, we explicitly disable preemption despite | |
930 | * having interrupts disabled. | |
931 | * This serves two purposes: it makes it much less likely that we would | |
932 | * accidentally schedule in exception context and it will force a warning | |
933 | * if we somehow manage to schedule by accident. | |
934 | */ | |
935 | static void debug_exception_enter(struct pt_regs *regs) | |
936 | { | |
d8bb6718 MH |
937 | preempt_disable(); |
938 | ||
939 | /* This code is a bit fragile. Test it. */ | |
940 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); | |
941 | } | |
942 | NOKPROBE_SYMBOL(debug_exception_enter); | |
943 | ||
944 | static void debug_exception_exit(struct pt_regs *regs) | |
945 | { | |
946 | preempt_enable_no_resched(); | |
d8bb6718 MH |
947 | } |
948 | NOKPROBE_SYMBOL(debug_exception_exit); | |
949 | ||
8d56e5c5 | 950 | void do_debug_exception(unsigned long addr_if_watchpoint, unsigned long esr, |
afa7c0e5 | 951 | struct pt_regs *regs) |
1d18c47c | 952 | { |
359048f9 | 953 | const struct fault_info *inf = esr_to_debug_fault_info(esr); |
b9a4b9d0 | 954 | unsigned long pc = instruction_pointer(regs); |
1d18c47c | 955 | |
d8bb6718 | 956 | debug_exception_enter(regs); |
1d18c47c | 957 | |
b9a4b9d0 | 958 | if (user_mode(regs) && !is_ttbr0_addr(pc)) |
5dfc6ed2 WD |
959 | arm64_apply_bp_hardening(); |
960 | ||
52c6d145 | 961 | if (inf->fn(addr_if_watchpoint, esr, regs)) { |
dceec3ff | 962 | arm64_notify_die(inf->name, regs, inf->sig, inf->code, pc, esr); |
6afedcd2 | 963 | } |
1d18c47c | 964 | |
d8bb6718 | 965 | debug_exception_exit(regs); |
1d18c47c | 966 | } |
2dd0e8d2 | 967 | NOKPROBE_SYMBOL(do_debug_exception); |
013bb59d PC |
968 | |
969 | /* | |
970 | * Used during anonymous page fault handling. | |
971 | */ | |
6bc56a4d | 972 | struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, |
013bb59d PC |
973 | unsigned long vaddr) |
974 | { | |
975 | gfp_t flags = GFP_HIGHUSER_MOVABLE | __GFP_ZERO; | |
976 | ||
977 | /* | |
978 | * If the page is mapped with PROT_MTE, initialise the tags at the | |
979 | * point of allocation and page zeroing as this is usually faster than | |
980 | * separate DC ZVA and STGM. | |
981 | */ | |
982 | if (vma->vm_flags & VM_MTE) | |
983 | flags |= __GFP_ZEROTAGS; | |
984 | ||
6bc56a4d | 985 | return vma_alloc_folio(flags, 0, vma, vaddr, false); |
013bb59d PC |
986 | } |
987 | ||
988 | void tag_clear_highpage(struct page *page) | |
989 | { | |
d77e59a8 CM |
990 | /* Newly allocated page, shouldn't have been tagged yet */ |
991 | WARN_ON_ONCE(!try_page_mte_tagging(page)); | |
013bb59d | 992 | mte_zero_clear_page_tags(page_address(page)); |
e059853d | 993 | set_page_mte_tagged(page); |
013bb59d | 994 | } |