1 // SPDX-License-Identifier: GPL-2.0-only
3 * Based on arch/arm/kernel/traps.c
5 * Copyright (C) 1995-2009 Russell King
6 * Copyright (C) 2012 ARM Ltd.
10 #include <linux/context_tracking.h>
11 #include <linux/signal.h>
12 #include <linux/personality.h>
13 #include <linux/kallsyms.h>
14 #include <linux/kprobes.h>
15 #include <linux/spinlock.h>
16 #include <linux/uaccess.h>
17 #include <linux/hardirq.h>
18 #include <linux/kdebug.h>
19 #include <linux/module.h>
20 #include <linux/kexec.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/sched/signal.h>
24 #include <linux/sched/debug.h>
25 #include <linux/sched/task_stack.h>
26 #include <linux/sizes.h>
27 #include <linux/syscalls.h>
28 #include <linux/mm_types.h>
29 #include <linux/kasan.h>
31 #include <asm/atomic.h>
33 #include <asm/cpufeature.h>
34 #include <asm/daifflags.h>
35 #include <asm/debug-monitors.h>
37 #include <asm/extable.h>
39 #include <asm/kprobes.h>
40 #include <asm/traps.h>
42 #include <asm/stack_pointer.h>
43 #include <asm/stacktrace.h>
44 #include <asm/exception.h>
45 #include <asm/system_misc.h>
46 #include <asm/sysreg.h>
48 static const char *handler[]= {
55 int show_unhandled_signals = 0;
57 static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
59 unsigned long addr = instruction_pointer(regs);
60 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
66 for (i = -4; i < 1; i++) {
67 unsigned int val, bad;
69 bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
72 p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
74 p += sprintf(p, "bad PC value");
79 printk("%sCode: %s\n", lvl, str);
83 #define S_PREEMPT " PREEMPT"
84 #elif defined(CONFIG_PREEMPT_RT)
85 #define S_PREEMPT " PREEMPT_RT"
92 static int __die(const char *str, int err, struct pt_regs *regs)
94 static int die_counter;
97 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
98 str, err, ++die_counter);
100 /* trap and error numbers are mostly meaningless on ARM */
101 ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
102 if (ret == NOTIFY_STOP)
108 dump_kernel_instr(KERN_EMERG, regs);
113 static DEFINE_RAW_SPINLOCK(die_lock);
116 * This function is protected against re-entrancy.
118 void die(const char *str, struct pt_regs *regs, int err)
123 raw_spin_lock_irqsave(&die_lock, flags);
129 ret = __die(str, err, regs);
131 if (regs && kexec_should_crash(current))
135 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
139 panic("%s: Fatal exception in interrupt", str);
141 panic("%s: Fatal exception", str);
143 raw_spin_unlock_irqrestore(&die_lock, flags);
145 if (ret != NOTIFY_STOP)
149 static void arm64_show_signal(int signo, const char *str)
151 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
152 DEFAULT_RATELIMIT_BURST);
153 struct task_struct *tsk = current;
154 unsigned int esr = tsk->thread.fault_code;
155 struct pt_regs *regs = task_pt_regs(tsk);
157 /* Leave if the signal won't be shown */
158 if (!show_unhandled_signals ||
159 !unhandled_signal(tsk, signo) ||
163 pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
165 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
168 print_vma_addr(KERN_CONT " in ", regs->pc);
173 void arm64_force_sig_fault(int signo, int code, void __user *addr,
176 arm64_show_signal(signo, str);
177 if (signo == SIGKILL)
180 force_sig_fault(signo, code, addr);
183 void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
186 arm64_show_signal(SIGBUS, str);
187 force_sig_mceerr(code, addr, lsb);
190 void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
193 arm64_show_signal(SIGTRAP, str);
194 force_sig_ptrace_errno_trap(errno, addr);
197 void arm64_notify_die(const char *str, struct pt_regs *regs,
198 int signo, int sicode, void __user *addr,
201 if (user_mode(regs)) {
202 WARN_ON(regs != current_pt_regs());
203 current->thread.fault_address = 0;
204 current->thread.fault_code = err;
206 arm64_force_sig_fault(signo, sicode, addr, str);
213 #define PSTATE_IT_1_0_SHIFT 25
214 #define PSTATE_IT_1_0_MASK (0x3 << PSTATE_IT_1_0_SHIFT)
215 #define PSTATE_IT_7_2_SHIFT 10
216 #define PSTATE_IT_7_2_MASK (0x3f << PSTATE_IT_7_2_SHIFT)
218 static u32 compat_get_it_state(struct pt_regs *regs)
220 u32 it, pstate = regs->pstate;
222 it = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
223 it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
228 static void compat_set_it_state(struct pt_regs *regs, u32 it)
232 pstate_it = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
233 pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
235 regs->pstate &= ~PSR_AA32_IT_MASK;
236 regs->pstate |= pstate_it;
239 static void advance_itstate(struct pt_regs *regs)
244 if (!(regs->pstate & PSR_AA32_T_BIT) ||
245 !(regs->pstate & PSR_AA32_IT_MASK))
248 it = compat_get_it_state(regs);
251 * If this is the last instruction of the block, wipe the IT
252 * state. Otherwise advance it.
257 it = (it & 0xe0) | ((it << 1) & 0x1f);
259 compat_set_it_state(regs, it);
262 static void advance_itstate(struct pt_regs *regs)
267 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
272 * If we were single stepping, we want to get the step exception after
273 * we return from the trap.
276 user_fastforward_single_step(current);
278 if (compat_user_mode(regs))
279 advance_itstate(regs);
281 regs->pstate &= ~PSR_BTYPE_MASK;
284 static LIST_HEAD(undef_hook);
285 static DEFINE_RAW_SPINLOCK(undef_lock);
287 void register_undef_hook(struct undef_hook *hook)
291 raw_spin_lock_irqsave(&undef_lock, flags);
292 list_add(&hook->node, &undef_hook);
293 raw_spin_unlock_irqrestore(&undef_lock, flags);
296 void unregister_undef_hook(struct undef_hook *hook)
300 raw_spin_lock_irqsave(&undef_lock, flags);
301 list_del(&hook->node);
302 raw_spin_unlock_irqrestore(&undef_lock, flags);
305 static int call_undef_hook(struct pt_regs *regs)
307 struct undef_hook *hook;
310 int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
311 void __user *pc = (void __user *)instruction_pointer(regs);
313 if (!user_mode(regs)) {
315 if (get_kernel_nofault(instr_le, (__force __le32 *)pc))
317 instr = le32_to_cpu(instr_le);
318 } else if (compat_thumb_mode(regs)) {
319 /* 16-bit Thumb instruction */
321 if (get_user(instr_le, (__le16 __user *)pc))
323 instr = le16_to_cpu(instr_le);
324 if (aarch32_insn_is_wide(instr)) {
327 if (get_user(instr_le, (__le16 __user *)(pc + 2)))
329 instr2 = le16_to_cpu(instr_le);
330 instr = (instr << 16) | instr2;
333 /* 32-bit ARM instruction */
335 if (get_user(instr_le, (__le32 __user *)pc))
337 instr = le32_to_cpu(instr_le);
340 raw_spin_lock_irqsave(&undef_lock, flags);
341 list_for_each_entry(hook, &undef_hook, node)
342 if ((instr & hook->instr_mask) == hook->instr_val &&
343 (regs->pstate & hook->pstate_mask) == hook->pstate_val)
346 raw_spin_unlock_irqrestore(&undef_lock, flags);
348 return fn ? fn(regs, instr) : 1;
351 void force_signal_inject(int signal, int code, unsigned long address, unsigned int err)
354 struct pt_regs *regs = current_pt_regs();
356 if (WARN_ON(!user_mode(regs)))
361 desc = "undefined instruction";
364 desc = "illegal memory access";
367 desc = "unknown or unrecoverable error";
371 /* Force signals we don't understand to SIGKILL */
372 if (WARN_ON(signal != SIGKILL &&
373 siginfo_layout(signal, code) != SIL_FAULT)) {
377 arm64_notify_die(desc, regs, signal, code, (void __user *)address, err);
381 * Set up process info to signal segmentation fault - called on access error.
383 void arm64_notify_segfault(unsigned long addr)
387 mmap_read_lock(current->mm);
388 if (find_vma(current->mm, addr) == NULL)
392 mmap_read_unlock(current->mm);
394 force_signal_inject(SIGSEGV, code, addr, 0);
397 void do_undefinstr(struct pt_regs *regs)
399 /* check for AArch32 breakpoint instructions */
400 if (!aarch32_break_handler(regs))
403 if (call_undef_hook(regs) == 0)
406 BUG_ON(!user_mode(regs));
407 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
409 NOKPROBE_SYMBOL(do_undefinstr);
411 void do_bti(struct pt_regs *regs)
413 BUG_ON(!user_mode(regs));
414 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
416 NOKPROBE_SYMBOL(do_bti);
418 void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr)
421 * Unexpected FPAC exception or pointer authentication failure in
422 * the kernel: kill the task before it does any more harm.
424 BUG_ON(!user_mode(regs));
425 force_signal_inject(SIGILL, ILL_ILLOPN, regs->pc, esr);
427 NOKPROBE_SYMBOL(do_ptrauth_fault);
429 #define __user_cache_maint(insn, address, res) \
430 if (address >= user_addr_max()) { \
433 uaccess_ttbr0_enable(); \
435 "1: " insn ", %1\n" \
438 " .pushsection .fixup,\"ax\"\n" \
440 "3: mov %w0, %w2\n" \
443 _ASM_EXTABLE(1b, 3b) \
445 : "r" (address), "i" (-EFAULT)); \
446 uaccess_ttbr0_disable(); \
449 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
451 unsigned long address;
452 int rt = ESR_ELx_SYS64_ISS_RT(esr);
453 int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
456 address = untagged_addr(pt_regs_read_reg(regs, rt));
459 case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */
460 __user_cache_maint("dc civac", address, ret);
462 case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */
463 __user_cache_maint("dc civac", address, ret);
465 case ESR_ELx_SYS64_ISS_CRM_DC_CVADP: /* DC CVADP */
466 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
468 case ESR_ELx_SYS64_ISS_CRM_DC_CVAP: /* DC CVAP */
469 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
471 case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */
472 __user_cache_maint("dc civac", address, ret);
474 case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */
475 __user_cache_maint("ic ivau", address, ret);
478 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
483 arm64_notify_segfault(address);
485 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
488 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
490 int rt = ESR_ELx_SYS64_ISS_RT(esr);
491 unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
493 if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
494 /* Hide DIC so that we can trap the unnecessary maintenance...*/
495 val &= ~BIT(CTR_DIC_SHIFT);
497 /* ... and fake IminLine to reduce the number of traps. */
498 val &= ~CTR_IMINLINE_MASK;
499 val |= (PAGE_SHIFT - 2) & CTR_IMINLINE_MASK;
502 pt_regs_write_reg(regs, rt, val);
504 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
507 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
509 int rt = ESR_ELx_SYS64_ISS_RT(esr);
511 pt_regs_write_reg(regs, rt, arch_timer_read_counter());
512 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
515 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
517 int rt = ESR_ELx_SYS64_ISS_RT(esr);
519 pt_regs_write_reg(regs, rt, arch_timer_get_rate());
520 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
523 static void mrs_handler(unsigned int esr, struct pt_regs *regs)
527 rt = ESR_ELx_SYS64_ISS_RT(esr);
528 sysreg = esr_sys64_to_sysreg(esr);
530 if (do_emulate_mrs(regs, sysreg, rt) != 0)
531 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
534 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
536 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
540 unsigned int esr_mask;
541 unsigned int esr_val;
542 void (*handler)(unsigned int esr, struct pt_regs *regs);
545 static const struct sys64_hook sys64_hooks[] = {
547 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
548 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
549 .handler = user_cache_maint_handler,
552 /* Trap read access to CTR_EL0 */
553 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
554 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
555 .handler = ctr_read_handler,
558 /* Trap read access to CNTVCT_EL0 */
559 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
560 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
561 .handler = cntvct_read_handler,
564 /* Trap read access to CNTFRQ_EL0 */
565 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
566 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
567 .handler = cntfrq_read_handler,
570 /* Trap read access to CPUID registers */
571 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
572 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
573 .handler = mrs_handler,
576 /* Trap WFI instructions executed in userspace */
577 .esr_mask = ESR_ELx_WFx_MASK,
578 .esr_val = ESR_ELx_WFx_WFI_VAL,
579 .handler = wfi_handler,
585 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
589 /* Only a T32 instruction can trap without CV being set */
590 if (!(esr & ESR_ELx_CV)) {
593 it = compat_get_it_state(regs);
599 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
602 return aarch32_opcode_cond_checks[cond](regs->pstate);
605 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
607 int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
609 pt_regs_write_reg(regs, reg, arch_timer_get_rate());
610 arm64_skip_faulting_instruction(regs, 4);
613 static const struct sys64_hook cp15_32_hooks[] = {
615 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
616 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
617 .handler = compat_cntfrq_read_handler,
622 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
624 int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
625 int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
626 u64 val = arch_timer_read_counter();
628 pt_regs_write_reg(regs, rt, lower_32_bits(val));
629 pt_regs_write_reg(regs, rt2, upper_32_bits(val));
630 arm64_skip_faulting_instruction(regs, 4);
633 static const struct sys64_hook cp15_64_hooks[] = {
635 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
636 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
637 .handler = compat_cntvct_read_handler,
642 void do_cp15instr(unsigned int esr, struct pt_regs *regs)
644 const struct sys64_hook *hook, *hook_base;
646 if (!cp15_cond_valid(esr, regs)) {
648 * There is no T16 variant of a CP access, so we
649 * always advance PC by 4 bytes.
651 arm64_skip_faulting_instruction(regs, 4);
655 switch (ESR_ELx_EC(esr)) {
656 case ESR_ELx_EC_CP15_32:
657 hook_base = cp15_32_hooks;
659 case ESR_ELx_EC_CP15_64:
660 hook_base = cp15_64_hooks;
667 for (hook = hook_base; hook->handler; hook++)
668 if ((hook->esr_mask & esr) == hook->esr_val) {
669 hook->handler(esr, regs);
674 * New cp15 instructions may previously have been undefined at
675 * EL0. Fall back to our usual undefined instruction handler
676 * so that we handle these consistently.
680 NOKPROBE_SYMBOL(do_cp15instr);
683 void do_sysinstr(unsigned int esr, struct pt_regs *regs)
685 const struct sys64_hook *hook;
687 for (hook = sys64_hooks; hook->handler; hook++)
688 if ((hook->esr_mask & esr) == hook->esr_val) {
689 hook->handler(esr, regs);
694 * New SYS instructions may previously have been undefined at EL0. Fall
695 * back to our usual undefined instruction handler so that we handle
696 * these consistently.
700 NOKPROBE_SYMBOL(do_sysinstr);
702 static const char *esr_class_str[] = {
703 [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
704 [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
705 [ESR_ELx_EC_WFx] = "WFI/WFE",
706 [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
707 [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
708 [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
709 [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
710 [ESR_ELx_EC_FP_ASIMD] = "ASIMD",
711 [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
712 [ESR_ELx_EC_PAC] = "PAC",
713 [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
714 [ESR_ELx_EC_BTI] = "BTI",
715 [ESR_ELx_EC_ILL] = "PSTATE.IL",
716 [ESR_ELx_EC_SVC32] = "SVC (AArch32)",
717 [ESR_ELx_EC_HVC32] = "HVC (AArch32)",
718 [ESR_ELx_EC_SMC32] = "SMC (AArch32)",
719 [ESR_ELx_EC_SVC64] = "SVC (AArch64)",
720 [ESR_ELx_EC_HVC64] = "HVC (AArch64)",
721 [ESR_ELx_EC_SMC64] = "SMC (AArch64)",
722 [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
723 [ESR_ELx_EC_SVE] = "SVE",
724 [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB",
725 [ESR_ELx_EC_FPAC] = "FPAC",
726 [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
727 [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
728 [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
729 [ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
730 [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
731 [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
732 [ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
733 [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
734 [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
735 [ESR_ELx_EC_SERROR] = "SError",
736 [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
737 [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
738 [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
739 [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
740 [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
741 [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
742 [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
743 [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
744 [ESR_ELx_EC_BRK64] = "BRK (AArch64)",
747 const char *esr_get_class_string(u32 esr)
749 return esr_class_str[ESR_ELx_EC(esr)];
753 * bad_mode handles the impossible case in the exception vector. This is always
756 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
760 pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
761 handler[reason], smp_processor_id(), esr,
762 esr_get_class_string(esr));
770 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
771 * exceptions taken from EL0. Unlike bad_mode, this returns.
773 void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
775 void __user *pc = (void __user *)instruction_pointer(regs);
777 current->thread.fault_address = 0;
778 current->thread.fault_code = esr;
780 arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
781 "Bad EL0 synchronous exception");
784 #ifdef CONFIG_VMAP_STACK
786 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
789 asmlinkage void handle_bad_stack(struct pt_regs *regs)
791 unsigned long tsk_stk = (unsigned long)current->stack;
792 unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
793 unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
794 unsigned int esr = read_sysreg(esr_el1);
795 unsigned long far = read_sysreg(far_el1);
798 pr_emerg("Insufficient stack space to handle exception!");
800 pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
801 pr_emerg("FAR: 0x%016lx\n", far);
803 pr_emerg("Task stack: [0x%016lx..0x%016lx]\n",
804 tsk_stk, tsk_stk + THREAD_SIZE);
805 pr_emerg("IRQ stack: [0x%016lx..0x%016lx]\n",
806 irq_stk, irq_stk + IRQ_STACK_SIZE);
807 pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
808 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
813 * We use nmi_panic to limit the potential for recusive overflows, and
814 * to get a better stack trace.
816 nmi_panic(NULL, "kernel stack overflow");
821 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
825 pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
826 smp_processor_id(), esr, esr_get_class_string(esr));
830 nmi_panic(regs, "Asynchronous SError Interrupt");
836 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
838 u32 aet = arm64_ras_serror_get_severity(esr);
841 case ESR_ELx_AET_CE: /* corrected error */
842 case ESR_ELx_AET_UEO: /* restartable, not yet consumed */
844 * The CPU can make progress. We may take UEO again as
845 * a more severe error.
849 case ESR_ELx_AET_UEU: /* Uncorrected Unrecoverable */
850 case ESR_ELx_AET_UER: /* Uncorrected Recoverable */
852 * The CPU can't make progress. The exception may have
855 * Neoverse-N1 #1349291 means a non-KVM SError reported as
856 * Unrecoverable should be treated as Uncontainable. We
857 * call arm64_serror_panic() in both cases.
861 case ESR_ELx_AET_UC: /* Uncontainable or Uncategorized error */
863 /* Error has been silently propagated */
864 arm64_serror_panic(regs, esr);
868 asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
872 /* non-RAS errors are not containable */
873 if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
874 arm64_serror_panic(regs, esr);
879 asmlinkage void enter_from_user_mode(void)
881 CT_WARN_ON(ct_state() != CONTEXT_USER);
884 NOKPROBE_SYMBOL(enter_from_user_mode);
886 /* GENERIC_BUG traps */
888 int is_valid_bugaddr(unsigned long addr)
891 * bug_handler() only called for BRK #BUG_BRK_IMM.
892 * So the answer is trivial -- any spurious instances with no
893 * bug table entry will be rejected by report_bug() and passed
894 * back to the debug-monitors code and handled as a fatal
895 * unexpected debug exception.
900 static int bug_handler(struct pt_regs *regs, unsigned int esr)
902 switch (report_bug(regs->pc, regs)) {
903 case BUG_TRAP_TYPE_BUG:
904 die("Oops - BUG", regs, 0);
907 case BUG_TRAP_TYPE_WARN:
911 /* unknown/unrecognised bug trap type */
912 return DBG_HOOK_ERROR;
915 /* If thread survives, skip over the BUG instruction and continue: */
916 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
917 return DBG_HOOK_HANDLED;
920 static struct break_hook bug_break_hook = {
925 static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
927 pr_err("%s generated an invalid instruction at %pS!\n",
928 in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
929 (void *)instruction_pointer(regs));
931 /* We cannot handle this */
932 return DBG_HOOK_ERROR;
935 static struct break_hook fault_break_hook = {
936 .fn = reserved_fault_handler,
937 .imm = FAULT_BRK_IMM,
940 #ifdef CONFIG_KASAN_SW_TAGS
942 #define KASAN_ESR_RECOVER 0x20
943 #define KASAN_ESR_WRITE 0x10
944 #define KASAN_ESR_SIZE_MASK 0x0f
945 #define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
947 static int kasan_handler(struct pt_regs *regs, unsigned int esr)
949 bool recover = esr & KASAN_ESR_RECOVER;
950 bool write = esr & KASAN_ESR_WRITE;
951 size_t size = KASAN_ESR_SIZE(esr);
952 u64 addr = regs->regs[0];
955 kasan_report(addr, size, write, pc);
958 * The instrumentation allows to control whether we can proceed after
959 * a crash was detected. This is done by passing the -recover flag to
960 * the compiler. Disabling recovery allows to generate more compact
963 * Unfortunately disabling recovery doesn't work for the kernel right
964 * now. KASAN reporting is disabled in some contexts (for example when
965 * the allocator accesses slab object metadata; this is controlled by
966 * current->kasan_depth). All these accesses are detected by the tool,
967 * even though the reports for them are not printed.
969 * This is something that might be fixed at some point in the future.
972 die("Oops - KASAN", regs, 0);
974 /* If thread survives, skip over the brk instruction and continue: */
975 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
976 return DBG_HOOK_HANDLED;
979 static struct break_hook kasan_break_hook = {
981 .imm = KASAN_BRK_IMM,
982 .mask = KASAN_BRK_MASK,
987 * Initial handler for AArch64 BRK exceptions
988 * This handler only used until debug_traps_init().
990 int __init early_brk64(unsigned long addr, unsigned int esr,
991 struct pt_regs *regs)
993 #ifdef CONFIG_KASAN_SW_TAGS
994 unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
996 if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
997 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
999 return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1002 void __init trap_init(void)
1004 register_kernel_break_hook(&bug_break_hook);
1005 register_kernel_break_hook(&fault_break_hook);
1006 #ifdef CONFIG_KASAN_SW_TAGS
1007 register_kernel_break_hook(&kasan_break_hook);