Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[linux-2.6-block.git] / arch / arm64 / kernel / traps.c
1 /*
2  * Based on arch/arm/kernel/traps.c
3  *
4  * Copyright (C) 1995-2009 Russell King
5  * Copyright (C) 2012 ARM Ltd.
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include <linux/bug.h>
21 #include <linux/signal.h>
22 #include <linux/personality.h>
23 #include <linux/kallsyms.h>
24 #include <linux/spinlock.h>
25 #include <linux/uaccess.h>
26 #include <linux/hardirq.h>
27 #include <linux/kdebug.h>
28 #include <linux/module.h>
29 #include <linux/kexec.h>
30 #include <linux/delay.h>
31 #include <linux/init.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/debug.h>
34 #include <linux/sched/task_stack.h>
35 #include <linux/sizes.h>
36 #include <linux/syscalls.h>
37 #include <linux/mm_types.h>
38 #include <linux/kasan.h>
39
40 #include <asm/atomic.h>
41 #include <asm/bug.h>
42 #include <asm/cpufeature.h>
43 #include <asm/daifflags.h>
44 #include <asm/debug-monitors.h>
45 #include <asm/esr.h>
46 #include <asm/insn.h>
47 #include <asm/traps.h>
48 #include <asm/smp.h>
49 #include <asm/stack_pointer.h>
50 #include <asm/stacktrace.h>
51 #include <asm/exception.h>
52 #include <asm/system_misc.h>
53 #include <asm/sysreg.h>
54
55 static const char *handler[]= {
56         "Synchronous Abort",
57         "IRQ",
58         "FIQ",
59         "Error"
60 };
61
62 int show_unhandled_signals = 0;
63
64 static void dump_backtrace_entry(unsigned long where)
65 {
66         printk(" %pS\n", (void *)where);
67 }
68
69 static void __dump_instr(const char *lvl, struct pt_regs *regs)
70 {
71         unsigned long addr = instruction_pointer(regs);
72         char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
73         int i;
74
75         for (i = -4; i < 1; i++) {
76                 unsigned int val, bad;
77
78                 bad = get_user(val, &((u32 *)addr)[i]);
79
80                 if (!bad)
81                         p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
82                 else {
83                         p += sprintf(p, "bad PC value");
84                         break;
85                 }
86         }
87         printk("%sCode: %s\n", lvl, str);
88 }
89
90 static void dump_instr(const char *lvl, struct pt_regs *regs)
91 {
92         if (!user_mode(regs)) {
93                 mm_segment_t fs = get_fs();
94                 set_fs(KERNEL_DS);
95                 __dump_instr(lvl, regs);
96                 set_fs(fs);
97         } else {
98                 __dump_instr(lvl, regs);
99         }
100 }
101
102 void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
103 {
104         struct stackframe frame;
105         int skip = 0;
106
107         pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
108
109         if (regs) {
110                 if (user_mode(regs))
111                         return;
112                 skip = 1;
113         }
114
115         if (!tsk)
116                 tsk = current;
117
118         if (!try_get_task_stack(tsk))
119                 return;
120
121         if (tsk == current) {
122                 frame.fp = (unsigned long)__builtin_frame_address(0);
123                 frame.pc = (unsigned long)dump_backtrace;
124         } else {
125                 /*
126                  * task blocked in __switch_to
127                  */
128                 frame.fp = thread_saved_fp(tsk);
129                 frame.pc = thread_saved_pc(tsk);
130         }
131 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
132         frame.graph = 0;
133 #endif
134
135         printk("Call trace:\n");
136         do {
137                 /* skip until specified stack frame */
138                 if (!skip) {
139                         dump_backtrace_entry(frame.pc);
140                 } else if (frame.fp == regs->regs[29]) {
141                         skip = 0;
142                         /*
143                          * Mostly, this is the case where this function is
144                          * called in panic/abort. As exception handler's
145                          * stack frame does not contain the corresponding pc
146                          * at which an exception has taken place, use regs->pc
147                          * instead.
148                          */
149                         dump_backtrace_entry(regs->pc);
150                 }
151         } while (!unwind_frame(tsk, &frame));
152
153         put_task_stack(tsk);
154 }
155
156 void show_stack(struct task_struct *tsk, unsigned long *sp)
157 {
158         dump_backtrace(NULL, tsk);
159         barrier();
160 }
161
162 #ifdef CONFIG_PREEMPT
163 #define S_PREEMPT " PREEMPT"
164 #else
165 #define S_PREEMPT ""
166 #endif
167 #define S_SMP " SMP"
168
169 static int __die(const char *str, int err, struct pt_regs *regs)
170 {
171         static int die_counter;
172         int ret;
173
174         pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
175                  str, err, ++die_counter);
176
177         /* trap and error numbers are mostly meaningless on ARM */
178         ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
179         if (ret == NOTIFY_STOP)
180                 return ret;
181
182         print_modules();
183         show_regs(regs);
184
185         if (!user_mode(regs))
186                 dump_instr(KERN_EMERG, regs);
187
188         return ret;
189 }
190
191 static DEFINE_RAW_SPINLOCK(die_lock);
192
193 /*
194  * This function is protected against re-entrancy.
195  */
196 void die(const char *str, struct pt_regs *regs, int err)
197 {
198         int ret;
199         unsigned long flags;
200
201         raw_spin_lock_irqsave(&die_lock, flags);
202
203         oops_enter();
204
205         console_verbose();
206         bust_spinlocks(1);
207         ret = __die(str, err, regs);
208
209         if (regs && kexec_should_crash(current))
210                 crash_kexec(regs);
211
212         bust_spinlocks(0);
213         add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
214         oops_exit();
215
216         if (in_interrupt())
217                 panic("Fatal exception in interrupt");
218         if (panic_on_oops)
219                 panic("Fatal exception");
220
221         raw_spin_unlock_irqrestore(&die_lock, flags);
222
223         if (ret != NOTIFY_STOP)
224                 do_exit(SIGSEGV);
225 }
226
227 static void arm64_show_signal(int signo, const char *str)
228 {
229         static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
230                                       DEFAULT_RATELIMIT_BURST);
231         struct task_struct *tsk = current;
232         unsigned int esr = tsk->thread.fault_code;
233         struct pt_regs *regs = task_pt_regs(tsk);
234
235         /* Leave if the signal won't be shown */
236         if (!show_unhandled_signals ||
237             !unhandled_signal(tsk, signo) ||
238             !__ratelimit(&rs))
239                 return;
240
241         pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
242         if (esr)
243                 pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
244
245         pr_cont("%s", str);
246         print_vma_addr(KERN_CONT " in ", regs->pc);
247         pr_cont("\n");
248         __show_regs(regs);
249 }
250
251 void arm64_force_sig_fault(int signo, int code, void __user *addr,
252                            const char *str)
253 {
254         arm64_show_signal(signo, str);
255         if (signo == SIGKILL)
256                 force_sig(SIGKILL, current);
257         else
258                 force_sig_fault(signo, code, addr, current);
259 }
260
261 void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
262                             const char *str)
263 {
264         arm64_show_signal(SIGBUS, str);
265         force_sig_mceerr(code, addr, lsb, current);
266 }
267
268 void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
269                                        const char *str)
270 {
271         arm64_show_signal(SIGTRAP, str);
272         force_sig_ptrace_errno_trap(errno, addr);
273 }
274
275 void arm64_notify_die(const char *str, struct pt_regs *regs,
276                       int signo, int sicode, void __user *addr,
277                       int err)
278 {
279         if (user_mode(regs)) {
280                 WARN_ON(regs != current_pt_regs());
281                 current->thread.fault_address = 0;
282                 current->thread.fault_code = err;
283
284                 arm64_force_sig_fault(signo, sicode, addr, str);
285         } else {
286                 die(str, regs, err);
287         }
288 }
289
290 void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
291 {
292         regs->pc += size;
293
294         /*
295          * If we were single stepping, we want to get the step exception after
296          * we return from the trap.
297          */
298         if (user_mode(regs))
299                 user_fastforward_single_step(current);
300 }
301
302 static LIST_HEAD(undef_hook);
303 static DEFINE_RAW_SPINLOCK(undef_lock);
304
305 void register_undef_hook(struct undef_hook *hook)
306 {
307         unsigned long flags;
308
309         raw_spin_lock_irqsave(&undef_lock, flags);
310         list_add(&hook->node, &undef_hook);
311         raw_spin_unlock_irqrestore(&undef_lock, flags);
312 }
313
314 void unregister_undef_hook(struct undef_hook *hook)
315 {
316         unsigned long flags;
317
318         raw_spin_lock_irqsave(&undef_lock, flags);
319         list_del(&hook->node);
320         raw_spin_unlock_irqrestore(&undef_lock, flags);
321 }
322
323 static int call_undef_hook(struct pt_regs *regs)
324 {
325         struct undef_hook *hook;
326         unsigned long flags;
327         u32 instr;
328         int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
329         void __user *pc = (void __user *)instruction_pointer(regs);
330
331         if (!user_mode(regs)) {
332                 __le32 instr_le;
333                 if (probe_kernel_address((__force __le32 *)pc, instr_le))
334                         goto exit;
335                 instr = le32_to_cpu(instr_le);
336         } else if (compat_thumb_mode(regs)) {
337                 /* 16-bit Thumb instruction */
338                 __le16 instr_le;
339                 if (get_user(instr_le, (__le16 __user *)pc))
340                         goto exit;
341                 instr = le16_to_cpu(instr_le);
342                 if (aarch32_insn_is_wide(instr)) {
343                         u32 instr2;
344
345                         if (get_user(instr_le, (__le16 __user *)(pc + 2)))
346                                 goto exit;
347                         instr2 = le16_to_cpu(instr_le);
348                         instr = (instr << 16) | instr2;
349                 }
350         } else {
351                 /* 32-bit ARM instruction */
352                 __le32 instr_le;
353                 if (get_user(instr_le, (__le32 __user *)pc))
354                         goto exit;
355                 instr = le32_to_cpu(instr_le);
356         }
357
358         raw_spin_lock_irqsave(&undef_lock, flags);
359         list_for_each_entry(hook, &undef_hook, node)
360                 if ((instr & hook->instr_mask) == hook->instr_val &&
361                         (regs->pstate & hook->pstate_mask) == hook->pstate_val)
362                         fn = hook->fn;
363
364         raw_spin_unlock_irqrestore(&undef_lock, flags);
365 exit:
366         return fn ? fn(regs, instr) : 1;
367 }
368
369 void force_signal_inject(int signal, int code, unsigned long address)
370 {
371         const char *desc;
372         struct pt_regs *regs = current_pt_regs();
373
374         if (WARN_ON(!user_mode(regs)))
375                 return;
376
377         switch (signal) {
378         case SIGILL:
379                 desc = "undefined instruction";
380                 break;
381         case SIGSEGV:
382                 desc = "illegal memory access";
383                 break;
384         default:
385                 desc = "unknown or unrecoverable error";
386                 break;
387         }
388
389         /* Force signals we don't understand to SIGKILL */
390         if (WARN_ON(signal != SIGKILL &&
391                     siginfo_layout(signal, code) != SIL_FAULT)) {
392                 signal = SIGKILL;
393         }
394
395         arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
396 }
397
398 /*
399  * Set up process info to signal segmentation fault - called on access error.
400  */
401 void arm64_notify_segfault(unsigned long addr)
402 {
403         int code;
404
405         down_read(&current->mm->mmap_sem);
406         if (find_vma(current->mm, addr) == NULL)
407                 code = SEGV_MAPERR;
408         else
409                 code = SEGV_ACCERR;
410         up_read(&current->mm->mmap_sem);
411
412         force_signal_inject(SIGSEGV, code, addr);
413 }
414
415 asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
416 {
417         /* check for AArch32 breakpoint instructions */
418         if (!aarch32_break_handler(regs))
419                 return;
420
421         if (call_undef_hook(regs) == 0)
422                 return;
423
424         BUG_ON(!user_mode(regs));
425         force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
426 }
427
428 #define __user_cache_maint(insn, address, res)                  \
429         if (address >= user_addr_max()) {                       \
430                 res = -EFAULT;                                  \
431         } else {                                                \
432                 uaccess_ttbr0_enable();                         \
433                 asm volatile (                                  \
434                         "1:     " insn ", %1\n"                 \
435                         "       mov     %w0, #0\n"              \
436                         "2:\n"                                  \
437                         "       .pushsection .fixup,\"ax\"\n"   \
438                         "       .align  2\n"                    \
439                         "3:     mov     %w0, %w2\n"             \
440                         "       b       2b\n"                   \
441                         "       .popsection\n"                  \
442                         _ASM_EXTABLE(1b, 3b)                    \
443                         : "=r" (res)                            \
444                         : "r" (address), "i" (-EFAULT));        \
445                 uaccess_ttbr0_disable();                        \
446         }
447
448 static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
449 {
450         unsigned long address;
451         int rt = ESR_ELx_SYS64_ISS_RT(esr);
452         int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
453         int ret = 0;
454
455         address = untagged_addr(pt_regs_read_reg(regs, rt));
456
457         switch (crm) {
458         case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:     /* DC CVAU, gets promoted */
459                 __user_cache_maint("dc civac", address, ret);
460                 break;
461         case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:     /* DC CVAC, gets promoted */
462                 __user_cache_maint("dc civac", address, ret);
463                 break;
464         case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:    /* DC CVADP */
465                 __user_cache_maint("sys 3, c7, c13, 1", address, ret);
466                 break;
467         case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:     /* DC CVAP */
468                 __user_cache_maint("sys 3, c7, c12, 1", address, ret);
469                 break;
470         case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:    /* DC CIVAC */
471                 __user_cache_maint("dc civac", address, ret);
472                 break;
473         case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:     /* IC IVAU */
474                 __user_cache_maint("ic ivau", address, ret);
475                 break;
476         default:
477                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
478                 return;
479         }
480
481         if (ret)
482                 arm64_notify_segfault(address);
483         else
484                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
485 }
486
487 static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
488 {
489         int rt = ESR_ELx_SYS64_ISS_RT(esr);
490         unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
491
492         pt_regs_write_reg(regs, rt, val);
493
494         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
495 }
496
497 static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
498 {
499         int rt = ESR_ELx_SYS64_ISS_RT(esr);
500
501         pt_regs_write_reg(regs, rt, arch_timer_read_counter());
502         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
503 }
504
505 static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
506 {
507         int rt = ESR_ELx_SYS64_ISS_RT(esr);
508
509         pt_regs_write_reg(regs, rt, arch_timer_get_rate());
510         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
511 }
512
513 static void mrs_handler(unsigned int esr, struct pt_regs *regs)
514 {
515         u32 sysreg, rt;
516
517         rt = ESR_ELx_SYS64_ISS_RT(esr);
518         sysreg = esr_sys64_to_sysreg(esr);
519
520         if (do_emulate_mrs(regs, sysreg, rt) != 0)
521                 force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
522 }
523
524 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
525 {
526         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
527 }
528
529 struct sys64_hook {
530         unsigned int esr_mask;
531         unsigned int esr_val;
532         void (*handler)(unsigned int esr, struct pt_regs *regs);
533 };
534
535 static struct sys64_hook sys64_hooks[] = {
536         {
537                 .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
538                 .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
539                 .handler = user_cache_maint_handler,
540         },
541         {
542                 /* Trap read access to CTR_EL0 */
543                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
544                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
545                 .handler = ctr_read_handler,
546         },
547         {
548                 /* Trap read access to CNTVCT_EL0 */
549                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
550                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
551                 .handler = cntvct_read_handler,
552         },
553         {
554                 /* Trap read access to CNTFRQ_EL0 */
555                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
556                 .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
557                 .handler = cntfrq_read_handler,
558         },
559         {
560                 /* Trap read access to CPUID registers */
561                 .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
562                 .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
563                 .handler = mrs_handler,
564         },
565         {
566                 /* Trap WFI instructions executed in userspace */
567                 .esr_mask = ESR_ELx_WFx_MASK,
568                 .esr_val = ESR_ELx_WFx_WFI_VAL,
569                 .handler = wfi_handler,
570         },
571         {},
572 };
573
574
575 #ifdef CONFIG_COMPAT
576 #define PSTATE_IT_1_0_SHIFT     25
577 #define PSTATE_IT_1_0_MASK      (0x3 << PSTATE_IT_1_0_SHIFT)
578 #define PSTATE_IT_7_2_SHIFT     10
579 #define PSTATE_IT_7_2_MASK      (0x3f << PSTATE_IT_7_2_SHIFT)
580
581 static u32 compat_get_it_state(struct pt_regs *regs)
582 {
583         u32 it, pstate = regs->pstate;
584
585         it  = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
586         it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
587
588         return it;
589 }
590
591 static void compat_set_it_state(struct pt_regs *regs, u32 it)
592 {
593         u32 pstate_it;
594
595         pstate_it  = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
596         pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
597
598         regs->pstate &= ~PSR_AA32_IT_MASK;
599         regs->pstate |= pstate_it;
600 }
601
602 static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
603 {
604         int cond;
605
606         /* Only a T32 instruction can trap without CV being set */
607         if (!(esr & ESR_ELx_CV)) {
608                 u32 it;
609
610                 it = compat_get_it_state(regs);
611                 if (!it)
612                         return true;
613
614                 cond = it >> 4;
615         } else {
616                 cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
617         }
618
619         return aarch32_opcode_cond_checks[cond](regs->pstate);
620 }
621
622 static void advance_itstate(struct pt_regs *regs)
623 {
624         u32 it;
625
626         /* ARM mode */
627         if (!(regs->pstate & PSR_AA32_T_BIT) ||
628             !(regs->pstate & PSR_AA32_IT_MASK))
629                 return;
630
631         it  = compat_get_it_state(regs);
632
633         /*
634          * If this is the last instruction of the block, wipe the IT
635          * state. Otherwise advance it.
636          */
637         if (!(it & 7))
638                 it = 0;
639         else
640                 it = (it & 0xe0) | ((it << 1) & 0x1f);
641
642         compat_set_it_state(regs, it);
643 }
644
645 static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
646                                                    unsigned int sz)
647 {
648         advance_itstate(regs);
649         arm64_skip_faulting_instruction(regs, sz);
650 }
651
652 static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
653 {
654         int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
655
656         pt_regs_write_reg(regs, reg, arch_timer_get_rate());
657         arm64_compat_skip_faulting_instruction(regs, 4);
658 }
659
660 static struct sys64_hook cp15_32_hooks[] = {
661         {
662                 .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
663                 .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
664                 .handler = compat_cntfrq_read_handler,
665         },
666         {},
667 };
668
669 static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
670 {
671         int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
672         int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
673         u64 val = arch_timer_read_counter();
674
675         pt_regs_write_reg(regs, rt, lower_32_bits(val));
676         pt_regs_write_reg(regs, rt2, upper_32_bits(val));
677         arm64_compat_skip_faulting_instruction(regs, 4);
678 }
679
680 static struct sys64_hook cp15_64_hooks[] = {
681         {
682                 .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
683                 .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
684                 .handler = compat_cntvct_read_handler,
685         },
686         {},
687 };
688
689 asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
690 {
691         struct sys64_hook *hook, *hook_base;
692
693         if (!cp15_cond_valid(esr, regs)) {
694                 /*
695                  * There is no T16 variant of a CP access, so we
696                  * always advance PC by 4 bytes.
697                  */
698                 arm64_compat_skip_faulting_instruction(regs, 4);
699                 return;
700         }
701
702         switch (ESR_ELx_EC(esr)) {
703         case ESR_ELx_EC_CP15_32:
704                 hook_base = cp15_32_hooks;
705                 break;
706         case ESR_ELx_EC_CP15_64:
707                 hook_base = cp15_64_hooks;
708                 break;
709         default:
710                 do_undefinstr(regs);
711                 return;
712         }
713
714         for (hook = hook_base; hook->handler; hook++)
715                 if ((hook->esr_mask & esr) == hook->esr_val) {
716                         hook->handler(esr, regs);
717                         return;
718                 }
719
720         /*
721          * New cp15 instructions may previously have been undefined at
722          * EL0. Fall back to our usual undefined instruction handler
723          * so that we handle these consistently.
724          */
725         do_undefinstr(regs);
726 }
727 #endif
728
729 asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
730 {
731         struct sys64_hook *hook;
732
733         for (hook = sys64_hooks; hook->handler; hook++)
734                 if ((hook->esr_mask & esr) == hook->esr_val) {
735                         hook->handler(esr, regs);
736                         return;
737                 }
738
739         /*
740          * New SYS instructions may previously have been undefined at EL0. Fall
741          * back to our usual undefined instruction handler so that we handle
742          * these consistently.
743          */
744         do_undefinstr(regs);
745 }
746
747 static const char *esr_class_str[] = {
748         [0 ... ESR_ELx_EC_MAX]          = "UNRECOGNIZED EC",
749         [ESR_ELx_EC_UNKNOWN]            = "Unknown/Uncategorized",
750         [ESR_ELx_EC_WFx]                = "WFI/WFE",
751         [ESR_ELx_EC_CP15_32]            = "CP15 MCR/MRC",
752         [ESR_ELx_EC_CP15_64]            = "CP15 MCRR/MRRC",
753         [ESR_ELx_EC_CP14_MR]            = "CP14 MCR/MRC",
754         [ESR_ELx_EC_CP14_LS]            = "CP14 LDC/STC",
755         [ESR_ELx_EC_FP_ASIMD]           = "ASIMD",
756         [ESR_ELx_EC_CP10_ID]            = "CP10 MRC/VMRS",
757         [ESR_ELx_EC_CP14_64]            = "CP14 MCRR/MRRC",
758         [ESR_ELx_EC_ILL]                = "PSTATE.IL",
759         [ESR_ELx_EC_SVC32]              = "SVC (AArch32)",
760         [ESR_ELx_EC_HVC32]              = "HVC (AArch32)",
761         [ESR_ELx_EC_SMC32]              = "SMC (AArch32)",
762         [ESR_ELx_EC_SVC64]              = "SVC (AArch64)",
763         [ESR_ELx_EC_HVC64]              = "HVC (AArch64)",
764         [ESR_ELx_EC_SMC64]              = "SMC (AArch64)",
765         [ESR_ELx_EC_SYS64]              = "MSR/MRS (AArch64)",
766         [ESR_ELx_EC_SVE]                = "SVE",
767         [ESR_ELx_EC_IMP_DEF]            = "EL3 IMP DEF",
768         [ESR_ELx_EC_IABT_LOW]           = "IABT (lower EL)",
769         [ESR_ELx_EC_IABT_CUR]           = "IABT (current EL)",
770         [ESR_ELx_EC_PC_ALIGN]           = "PC Alignment",
771         [ESR_ELx_EC_DABT_LOW]           = "DABT (lower EL)",
772         [ESR_ELx_EC_DABT_CUR]           = "DABT (current EL)",
773         [ESR_ELx_EC_SP_ALIGN]           = "SP Alignment",
774         [ESR_ELx_EC_FP_EXC32]           = "FP (AArch32)",
775         [ESR_ELx_EC_FP_EXC64]           = "FP (AArch64)",
776         [ESR_ELx_EC_SERROR]             = "SError",
777         [ESR_ELx_EC_BREAKPT_LOW]        = "Breakpoint (lower EL)",
778         [ESR_ELx_EC_BREAKPT_CUR]        = "Breakpoint (current EL)",
779         [ESR_ELx_EC_SOFTSTP_LOW]        = "Software Step (lower EL)",
780         [ESR_ELx_EC_SOFTSTP_CUR]        = "Software Step (current EL)",
781         [ESR_ELx_EC_WATCHPT_LOW]        = "Watchpoint (lower EL)",
782         [ESR_ELx_EC_WATCHPT_CUR]        = "Watchpoint (current EL)",
783         [ESR_ELx_EC_BKPT32]             = "BKPT (AArch32)",
784         [ESR_ELx_EC_VECTOR32]           = "Vector catch (AArch32)",
785         [ESR_ELx_EC_BRK64]              = "BRK (AArch64)",
786 };
787
788 const char *esr_get_class_string(u32 esr)
789 {
790         return esr_class_str[ESR_ELx_EC(esr)];
791 }
792
793 /*
794  * bad_mode handles the impossible case in the exception vector. This is always
795  * fatal.
796  */
797 asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
798 {
799         console_verbose();
800
801         pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
802                 handler[reason], smp_processor_id(), esr,
803                 esr_get_class_string(esr));
804
805         local_daif_mask();
806         panic("bad mode");
807 }
808
809 /*
810  * bad_el0_sync handles unexpected, but potentially recoverable synchronous
811  * exceptions taken from EL0. Unlike bad_mode, this returns.
812  */
813 asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
814 {
815         void __user *pc = (void __user *)instruction_pointer(regs);
816
817         current->thread.fault_address = 0;
818         current->thread.fault_code = esr;
819
820         arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
821                               "Bad EL0 synchronous exception");
822 }
823
824 #ifdef CONFIG_VMAP_STACK
825
826 DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
827         __aligned(16);
828
829 asmlinkage void handle_bad_stack(struct pt_regs *regs)
830 {
831         unsigned long tsk_stk = (unsigned long)current->stack;
832         unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
833         unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
834         unsigned int esr = read_sysreg(esr_el1);
835         unsigned long far = read_sysreg(far_el1);
836
837         console_verbose();
838         pr_emerg("Insufficient stack space to handle exception!");
839
840         pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
841         pr_emerg("FAR: 0x%016lx\n", far);
842
843         pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
844                  tsk_stk, tsk_stk + THREAD_SIZE);
845         pr_emerg("IRQ stack:      [0x%016lx..0x%016lx]\n",
846                  irq_stk, irq_stk + THREAD_SIZE);
847         pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
848                  ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
849
850         __show_regs(regs);
851
852         /*
853          * We use nmi_panic to limit the potential for recusive overflows, and
854          * to get a better stack trace.
855          */
856         nmi_panic(NULL, "kernel stack overflow");
857         cpu_park_loop();
858 }
859 #endif
860
861 void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
862 {
863         console_verbose();
864
865         pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
866                 smp_processor_id(), esr, esr_get_class_string(esr));
867         if (regs)
868                 __show_regs(regs);
869
870         nmi_panic(regs, "Asynchronous SError Interrupt");
871
872         cpu_park_loop();
873         unreachable();
874 }
875
876 bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
877 {
878         u32 aet = arm64_ras_serror_get_severity(esr);
879
880         switch (aet) {
881         case ESR_ELx_AET_CE:    /* corrected error */
882         case ESR_ELx_AET_UEO:   /* restartable, not yet consumed */
883                 /*
884                  * The CPU can make progress. We may take UEO again as
885                  * a more severe error.
886                  */
887                 return false;
888
889         case ESR_ELx_AET_UEU:   /* Uncorrected Unrecoverable */
890         case ESR_ELx_AET_UER:   /* Uncorrected Recoverable */
891                 /*
892                  * The CPU can't make progress. The exception may have
893                  * been imprecise.
894                  */
895                 return true;
896
897         case ESR_ELx_AET_UC:    /* Uncontainable or Uncategorized error */
898         default:
899                 /* Error has been silently propagated */
900                 arm64_serror_panic(regs, esr);
901         }
902 }
903
904 asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
905 {
906         const bool was_in_nmi = in_nmi();
907
908         if (!was_in_nmi)
909                 nmi_enter();
910
911         /* non-RAS errors are not containable */
912         if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
913                 arm64_serror_panic(regs, esr);
914
915         if (!was_in_nmi)
916                 nmi_exit();
917 }
918
919 void __pte_error(const char *file, int line, unsigned long val)
920 {
921         pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
922 }
923
924 void __pmd_error(const char *file, int line, unsigned long val)
925 {
926         pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
927 }
928
929 void __pud_error(const char *file, int line, unsigned long val)
930 {
931         pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
932 }
933
934 void __pgd_error(const char *file, int line, unsigned long val)
935 {
936         pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
937 }
938
939 /* GENERIC_BUG traps */
940
941 int is_valid_bugaddr(unsigned long addr)
942 {
943         /*
944          * bug_handler() only called for BRK #BUG_BRK_IMM.
945          * So the answer is trivial -- any spurious instances with no
946          * bug table entry will be rejected by report_bug() and passed
947          * back to the debug-monitors code and handled as a fatal
948          * unexpected debug exception.
949          */
950         return 1;
951 }
952
953 static int bug_handler(struct pt_regs *regs, unsigned int esr)
954 {
955         switch (report_bug(regs->pc, regs)) {
956         case BUG_TRAP_TYPE_BUG:
957                 die("Oops - BUG", regs, 0);
958                 break;
959
960         case BUG_TRAP_TYPE_WARN:
961                 break;
962
963         default:
964                 /* unknown/unrecognised bug trap type */
965                 return DBG_HOOK_ERROR;
966         }
967
968         /* If thread survives, skip over the BUG instruction and continue: */
969         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
970         return DBG_HOOK_HANDLED;
971 }
972
973 static struct break_hook bug_break_hook = {
974         .fn = bug_handler,
975         .imm = BUG_BRK_IMM,
976 };
977
978 #ifdef CONFIG_KASAN_SW_TAGS
979
980 #define KASAN_ESR_RECOVER       0x20
981 #define KASAN_ESR_WRITE 0x10
982 #define KASAN_ESR_SIZE_MASK     0x0f
983 #define KASAN_ESR_SIZE(esr)     (1 << ((esr) & KASAN_ESR_SIZE_MASK))
984
985 static int kasan_handler(struct pt_regs *regs, unsigned int esr)
986 {
987         bool recover = esr & KASAN_ESR_RECOVER;
988         bool write = esr & KASAN_ESR_WRITE;
989         size_t size = KASAN_ESR_SIZE(esr);
990         u64 addr = regs->regs[0];
991         u64 pc = regs->pc;
992
993         kasan_report(addr, size, write, pc);
994
995         /*
996          * The instrumentation allows to control whether we can proceed after
997          * a crash was detected. This is done by passing the -recover flag to
998          * the compiler. Disabling recovery allows to generate more compact
999          * code.
1000          *
1001          * Unfortunately disabling recovery doesn't work for the kernel right
1002          * now. KASAN reporting is disabled in some contexts (for example when
1003          * the allocator accesses slab object metadata; this is controlled by
1004          * current->kasan_depth). All these accesses are detected by the tool,
1005          * even though the reports for them are not printed.
1006          *
1007          * This is something that might be fixed at some point in the future.
1008          */
1009         if (!recover)
1010                 die("Oops - KASAN", regs, 0);
1011
1012         /* If thread survives, skip over the brk instruction and continue: */
1013         arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1014         return DBG_HOOK_HANDLED;
1015 }
1016
1017 static struct break_hook kasan_break_hook = {
1018         .fn     = kasan_handler,
1019         .imm    = KASAN_BRK_IMM,
1020         .mask   = KASAN_BRK_MASK,
1021 };
1022 #endif
1023
1024 /*
1025  * Initial handler for AArch64 BRK exceptions
1026  * This handler only used until debug_traps_init().
1027  */
1028 int __init early_brk64(unsigned long addr, unsigned int esr,
1029                 struct pt_regs *regs)
1030 {
1031 #ifdef CONFIG_KASAN_SW_TAGS
1032         unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
1033
1034         if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
1035                 return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1036 #endif
1037         return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1038 }
1039
1040 /* This registration must happen early, before debug_traps_init(). */
1041 void __init trap_init(void)
1042 {
1043         register_kernel_break_hook(&bug_break_hook);
1044 #ifdef CONFIG_KASAN_SW_TAGS
1045         register_kernel_break_hook(&kasan_break_hook);
1046 #endif
1047 }