Commit | Line | Data |
---|---|---|
60ffc30d CM |
1 | /* |
2 | * Based on arch/arm/kernel/traps.c | |
3 | * | |
4 | * Copyright (C) 1995-2009 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
9fb7410f | 20 | #include <linux/bug.h> |
60ffc30d CM |
21 | #include <linux/signal.h> |
22 | #include <linux/personality.h> | |
23 | #include <linux/kallsyms.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/uaccess.h> | |
26 | #include <linux/hardirq.h> | |
27 | #include <linux/kdebug.h> | |
28 | #include <linux/module.h> | |
29 | #include <linux/kexec.h> | |
30 | #include <linux/delay.h> | |
31 | #include <linux/init.h> | |
32 | #include <linux/sched.h> | |
33 | #include <linux/syscalls.h> | |
34 | ||
35 | #include <asm/atomic.h> | |
9fb7410f | 36 | #include <asm/bug.h> |
1442b6ed | 37 | #include <asm/debug-monitors.h> |
60a1f02c | 38 | #include <asm/esr.h> |
9fb7410f | 39 | #include <asm/insn.h> |
60ffc30d | 40 | #include <asm/traps.h> |
a9ea0017 | 41 | #include <asm/stack_pointer.h> |
60ffc30d CM |
42 | #include <asm/stacktrace.h> |
43 | #include <asm/exception.h> | |
44 | #include <asm/system_misc.h> | |
7dd01aef | 45 | #include <asm/sysreg.h> |
60ffc30d CM |
46 | |
47 | static const char *handler[]= { | |
48 | "Synchronous Abort", | |
49 | "IRQ", | |
50 | "FIQ", | |
51 | "Error" | |
52 | }; | |
53 | ||
54 | int show_unhandled_signals = 1; | |
55 | ||
56 | /* | |
7ceb3a10 | 57 | * Dump out the contents of some kernel memory nicely... |
60ffc30d CM |
58 | */ |
59 | static void dump_mem(const char *lvl, const char *str, unsigned long bottom, | |
7ceb3a10 | 60 | unsigned long top) |
60ffc30d CM |
61 | { |
62 | unsigned long first; | |
63 | mm_segment_t fs; | |
64 | int i; | |
65 | ||
66 | /* | |
67 | * We need to switch to kernel mode so that we can use __get_user | |
c5cea06b | 68 | * to safely read from kernel space. |
60ffc30d CM |
69 | */ |
70 | fs = get_fs(); | |
71 | set_fs(KERNEL_DS); | |
72 | ||
73 | printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top); | |
74 | ||
75 | for (first = bottom & ~31; first < top; first += 32) { | |
76 | unsigned long p; | |
77 | char str[sizeof(" 12345678") * 8 + 1]; | |
78 | ||
79 | memset(str, ' ', sizeof(str)); | |
80 | str[sizeof(str) - 1] = '\0'; | |
81 | ||
7ceb3a10 MR |
82 | for (p = first, i = 0; i < (32 / 8) |
83 | && p < top; i++, p += 8) { | |
60ffc30d | 84 | if (p >= bottom && p < top) { |
e147ae6d RT |
85 | unsigned long val; |
86 | ||
7ceb3a10 MR |
87 | if (__get_user(val, (unsigned long *)p) == 0) |
88 | sprintf(str + i * 17, " %016lx", val); | |
89 | else | |
90 | sprintf(str + i * 17, " ????????????????"); | |
60ffc30d CM |
91 | } |
92 | } | |
93 | printk("%s%04lx:%s\n", lvl, first & 0xffff, str); | |
94 | } | |
95 | ||
96 | set_fs(fs); | |
97 | } | |
98 | ||
9f93f3e9 | 99 | static void dump_backtrace_entry(unsigned long where) |
60ffc30d | 100 | { |
9f93f3e9 JL |
101 | /* |
102 | * Note that 'where' can have a physical address, but it's not handled. | |
103 | */ | |
60ffc30d | 104 | print_ip_sym(where); |
60ffc30d CM |
105 | } |
106 | ||
c5cea06b | 107 | static void __dump_instr(const char *lvl, struct pt_regs *regs) |
60ffc30d CM |
108 | { |
109 | unsigned long addr = instruction_pointer(regs); | |
60ffc30d CM |
110 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; |
111 | int i; | |
112 | ||
60ffc30d CM |
113 | for (i = -4; i < 1; i++) { |
114 | unsigned int val, bad; | |
115 | ||
116 | bad = __get_user(val, &((u32 *)addr)[i]); | |
117 | ||
118 | if (!bad) | |
119 | p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); | |
120 | else { | |
121 | p += sprintf(p, "bad PC value"); | |
122 | break; | |
123 | } | |
124 | } | |
125 | printk("%sCode: %s\n", lvl, str); | |
c5cea06b | 126 | } |
60ffc30d | 127 | |
c5cea06b MR |
128 | static void dump_instr(const char *lvl, struct pt_regs *regs) |
129 | { | |
130 | if (!user_mode(regs)) { | |
131 | mm_segment_t fs = get_fs(); | |
132 | set_fs(KERNEL_DS); | |
133 | __dump_instr(lvl, regs); | |
134 | set_fs(fs); | |
135 | } else { | |
136 | __dump_instr(lvl, regs); | |
137 | } | |
60ffc30d CM |
138 | } |
139 | ||
140 | static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |
141 | { | |
142 | struct stackframe frame; | |
a80a0eb7 | 143 | unsigned long irq_stack_ptr; |
20380bb3 | 144 | int skip; |
60ffc30d | 145 | |
b5e7307d MR |
146 | pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); |
147 | ||
148 | if (!tsk) | |
149 | tsk = current; | |
150 | ||
9bbd4c56 MR |
151 | if (!try_get_task_stack(tsk)) |
152 | return; | |
153 | ||
a80a0eb7 YS |
154 | /* |
155 | * Switching between stacks is valid when tracing current and in | |
156 | * non-preemptible context. | |
157 | */ | |
158 | if (tsk == current && !preemptible()) | |
159 | irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id()); | |
160 | else | |
161 | irq_stack_ptr = 0; | |
162 | ||
20380bb3 | 163 | if (tsk == current) { |
60ffc30d | 164 | frame.fp = (unsigned long)__builtin_frame_address(0); |
2128df14 | 165 | frame.sp = current_stack_pointer; |
60ffc30d CM |
166 | frame.pc = (unsigned long)dump_backtrace; |
167 | } else { | |
168 | /* | |
169 | * task blocked in __switch_to | |
170 | */ | |
171 | frame.fp = thread_saved_fp(tsk); | |
172 | frame.sp = thread_saved_sp(tsk); | |
173 | frame.pc = thread_saved_pc(tsk); | |
174 | } | |
20380bb3 AT |
175 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
176 | frame.graph = tsk->curr_ret_stack; | |
177 | #endif | |
60ffc30d | 178 | |
20380bb3 | 179 | skip = !!regs; |
c9cd0ed9 | 180 | printk("Call trace:\n"); |
60ffc30d CM |
181 | while (1) { |
182 | unsigned long where = frame.pc; | |
9f93f3e9 | 183 | unsigned long stack; |
60ffc30d CM |
184 | int ret; |
185 | ||
20380bb3 AT |
186 | /* skip until specified stack frame */ |
187 | if (!skip) { | |
188 | dump_backtrace_entry(where); | |
189 | } else if (frame.fp == regs->regs[29]) { | |
190 | skip = 0; | |
191 | /* | |
192 | * Mostly, this is the case where this function is | |
193 | * called in panic/abort. As exception handler's | |
194 | * stack frame does not contain the corresponding pc | |
195 | * at which an exception has taken place, use regs->pc | |
196 | * instead. | |
197 | */ | |
198 | dump_backtrace_entry(regs->pc); | |
199 | } | |
fe13f95b | 200 | ret = unwind_frame(tsk, &frame); |
60ffc30d CM |
201 | if (ret < 0) |
202 | break; | |
9f93f3e9 | 203 | stack = frame.sp; |
132cd887 AT |
204 | if (in_exception_text(where)) { |
205 | /* | |
206 | * If we switched to the irq_stack before calling this | |
207 | * exception handler, then the pt_regs will be on the | |
208 | * task stack. The easiest way to tell is if the large | |
209 | * pt_regs would overlap with the end of the irq_stack. | |
210 | */ | |
211 | if (stack < irq_stack_ptr && | |
212 | (stack + sizeof(struct pt_regs)) > irq_stack_ptr) | |
213 | stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr); | |
214 | ||
9f93f3e9 | 215 | dump_mem("", "Exception stack", stack, |
7ceb3a10 | 216 | stack + sizeof(struct pt_regs)); |
132cd887 | 217 | } |
60ffc30d | 218 | } |
9bbd4c56 MR |
219 | |
220 | put_task_stack(tsk); | |
60ffc30d CM |
221 | } |
222 | ||
60ffc30d CM |
223 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
224 | { | |
225 | dump_backtrace(NULL, tsk); | |
226 | barrier(); | |
227 | } | |
228 | ||
229 | #ifdef CONFIG_PREEMPT | |
230 | #define S_PREEMPT " PREEMPT" | |
231 | #else | |
232 | #define S_PREEMPT "" | |
233 | #endif | |
60ffc30d | 234 | #define S_SMP " SMP" |
60ffc30d | 235 | |
876e7a38 | 236 | static int __die(const char *str, int err, struct pt_regs *regs) |
60ffc30d | 237 | { |
876e7a38 | 238 | struct task_struct *tsk = current; |
60ffc30d CM |
239 | static int die_counter; |
240 | int ret; | |
241 | ||
242 | pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", | |
243 | str, err, ++die_counter); | |
244 | ||
245 | /* trap and error numbers are mostly meaningless on ARM */ | |
246 | ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); | |
247 | if (ret == NOTIFY_STOP) | |
248 | return ret; | |
249 | ||
250 | print_modules(); | |
251 | __show_regs(regs); | |
252 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", | |
876e7a38 MR |
253 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), |
254 | end_of_stack(tsk)); | |
60ffc30d | 255 | |
7ceb3a10 | 256 | if (!user_mode(regs)) { |
60ffc30d | 257 | dump_mem(KERN_EMERG, "Stack: ", regs->sp, |
7ceb3a10 | 258 | THREAD_SIZE + (unsigned long)task_stack_page(tsk)); |
60ffc30d CM |
259 | dump_backtrace(regs, tsk); |
260 | dump_instr(KERN_EMERG, regs); | |
261 | } | |
262 | ||
263 | return ret; | |
264 | } | |
265 | ||
266 | static DEFINE_RAW_SPINLOCK(die_lock); | |
267 | ||
268 | /* | |
269 | * This function is protected against re-entrancy. | |
270 | */ | |
271 | void die(const char *str, struct pt_regs *regs, int err) | |
272 | { | |
60ffc30d CM |
273 | int ret; |
274 | ||
275 | oops_enter(); | |
276 | ||
277 | raw_spin_lock_irq(&die_lock); | |
278 | console_verbose(); | |
279 | bust_spinlocks(1); | |
876e7a38 | 280 | ret = __die(str, err, regs); |
60ffc30d | 281 | |
876e7a38 | 282 | if (regs && kexec_should_crash(current)) |
60ffc30d CM |
283 | crash_kexec(regs); |
284 | ||
285 | bust_spinlocks(0); | |
373d4d09 | 286 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
60ffc30d CM |
287 | raw_spin_unlock_irq(&die_lock); |
288 | oops_exit(); | |
289 | ||
290 | if (in_interrupt()) | |
291 | panic("Fatal exception in interrupt"); | |
292 | if (panic_on_oops) | |
293 | panic("Fatal exception"); | |
294 | if (ret != NOTIFY_STOP) | |
295 | do_exit(SIGSEGV); | |
296 | } | |
297 | ||
298 | void arm64_notify_die(const char *str, struct pt_regs *regs, | |
299 | struct siginfo *info, int err) | |
300 | { | |
9141300a CM |
301 | if (user_mode(regs)) { |
302 | current->thread.fault_address = 0; | |
303 | current->thread.fault_code = err; | |
60ffc30d | 304 | force_sig_info(info->si_signo, info, current); |
9141300a | 305 | } else { |
60ffc30d | 306 | die(str, regs, err); |
9141300a | 307 | } |
60ffc30d CM |
308 | } |
309 | ||
9b79f52d PA |
310 | static LIST_HEAD(undef_hook); |
311 | static DEFINE_RAW_SPINLOCK(undef_lock); | |
312 | ||
313 | void register_undef_hook(struct undef_hook *hook) | |
314 | { | |
315 | unsigned long flags; | |
316 | ||
317 | raw_spin_lock_irqsave(&undef_lock, flags); | |
318 | list_add(&hook->node, &undef_hook); | |
319 | raw_spin_unlock_irqrestore(&undef_lock, flags); | |
320 | } | |
321 | ||
322 | void unregister_undef_hook(struct undef_hook *hook) | |
323 | { | |
324 | unsigned long flags; | |
325 | ||
326 | raw_spin_lock_irqsave(&undef_lock, flags); | |
327 | list_del(&hook->node); | |
328 | raw_spin_unlock_irqrestore(&undef_lock, flags); | |
329 | } | |
330 | ||
331 | static int call_undef_hook(struct pt_regs *regs) | |
332 | { | |
333 | struct undef_hook *hook; | |
334 | unsigned long flags; | |
335 | u32 instr; | |
336 | int (*fn)(struct pt_regs *regs, u32 instr) = NULL; | |
337 | void __user *pc = (void __user *)instruction_pointer(regs); | |
338 | ||
339 | if (!user_mode(regs)) | |
340 | return 1; | |
341 | ||
342 | if (compat_thumb_mode(regs)) { | |
343 | /* 16-bit Thumb instruction */ | |
344 | if (get_user(instr, (u16 __user *)pc)) | |
345 | goto exit; | |
346 | instr = le16_to_cpu(instr); | |
347 | if (aarch32_insn_is_wide(instr)) { | |
348 | u32 instr2; | |
349 | ||
350 | if (get_user(instr2, (u16 __user *)(pc + 2))) | |
351 | goto exit; | |
352 | instr2 = le16_to_cpu(instr2); | |
353 | instr = (instr << 16) | instr2; | |
354 | } | |
355 | } else { | |
356 | /* 32-bit ARM instruction */ | |
357 | if (get_user(instr, (u32 __user *)pc)) | |
358 | goto exit; | |
359 | instr = le32_to_cpu(instr); | |
360 | } | |
361 | ||
362 | raw_spin_lock_irqsave(&undef_lock, flags); | |
363 | list_for_each_entry(hook, &undef_hook, node) | |
364 | if ((instr & hook->instr_mask) == hook->instr_val && | |
365 | (regs->pstate & hook->pstate_mask) == hook->pstate_val) | |
366 | fn = hook->fn; | |
367 | ||
368 | raw_spin_unlock_irqrestore(&undef_lock, flags); | |
369 | exit: | |
370 | return fn ? fn(regs, instr) : 1; | |
371 | } | |
372 | ||
390bf177 AP |
373 | static void force_signal_inject(int signal, int code, struct pt_regs *regs, |
374 | unsigned long address) | |
60ffc30d CM |
375 | { |
376 | siginfo_t info; | |
377 | void __user *pc = (void __user *)instruction_pointer(regs); | |
390bf177 AP |
378 | const char *desc; |
379 | ||
380 | switch (signal) { | |
381 | case SIGILL: | |
382 | desc = "undefined instruction"; | |
383 | break; | |
384 | case SIGSEGV: | |
385 | desc = "illegal memory access"; | |
386 | break; | |
387 | default: | |
388 | desc = "bad mode"; | |
389 | break; | |
390 | } | |
391 | ||
392 | if (unhandled_signal(current, signal) && | |
393 | show_unhandled_signals_ratelimited()) { | |
394 | pr_info("%s[%d]: %s: pc=%p\n", | |
395 | current->comm, task_pid_nr(current), desc, pc); | |
396 | dump_instr(KERN_INFO, regs); | |
397 | } | |
398 | ||
399 | info.si_signo = signal; | |
400 | info.si_errno = 0; | |
401 | info.si_code = code; | |
402 | info.si_addr = pc; | |
403 | ||
404 | arm64_notify_die(desc, regs, &info, 0); | |
405 | } | |
406 | ||
407 | /* | |
408 | * Set up process info to signal segmentation fault - called on access error. | |
409 | */ | |
410 | void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr) | |
411 | { | |
412 | int code; | |
413 | ||
414 | down_read(¤t->mm->mmap_sem); | |
415 | if (find_vma(current->mm, addr) == NULL) | |
416 | code = SEGV_MAPERR; | |
417 | else | |
418 | code = SEGV_ACCERR; | |
419 | up_read(¤t->mm->mmap_sem); | |
60ffc30d | 420 | |
390bf177 AP |
421 | force_signal_inject(SIGSEGV, code, regs, addr); |
422 | } | |
423 | ||
424 | asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |
425 | { | |
60ffc30d | 426 | /* check for AArch32 breakpoint instructions */ |
1442b6ed | 427 | if (!aarch32_break_handler(regs)) |
60ffc30d | 428 | return; |
60ffc30d | 429 | |
9b79f52d PA |
430 | if (call_undef_hook(regs) == 0) |
431 | return; | |
432 | ||
390bf177 | 433 | force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); |
60ffc30d CM |
434 | } |
435 | ||
2a6dcb2b | 436 | int cpu_enable_cache_maint_trap(void *__unused) |
7dd01aef AP |
437 | { |
438 | config_sctlr_el1(SCTLR_EL1_UCI, 0); | |
2a6dcb2b | 439 | return 0; |
7dd01aef AP |
440 | } |
441 | ||
442 | #define __user_cache_maint(insn, address, res) \ | |
39bc88e5 | 443 | if (untagged_addr(address) >= user_addr_max()) { \ |
87261d19 | 444 | res = -EFAULT; \ |
39bc88e5 CM |
445 | } else { \ |
446 | uaccess_ttbr0_enable(); \ | |
87261d19 AP |
447 | asm volatile ( \ |
448 | "1: " insn ", %1\n" \ | |
449 | " mov %w0, #0\n" \ | |
450 | "2:\n" \ | |
451 | " .pushsection .fixup,\"ax\"\n" \ | |
452 | " .align 2\n" \ | |
453 | "3: mov %w0, %w2\n" \ | |
454 | " b 2b\n" \ | |
455 | " .popsection\n" \ | |
456 | _ASM_EXTABLE(1b, 3b) \ | |
457 | : "=r" (res) \ | |
39bc88e5 CM |
458 | : "r" (address), "i" (-EFAULT)); \ |
459 | uaccess_ttbr0_disable(); \ | |
460 | } | |
7dd01aef | 461 | |
9dbd5bb2 | 462 | static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs) |
7dd01aef AP |
463 | { |
464 | unsigned long address; | |
9dbd5bb2 SP |
465 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; |
466 | int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT; | |
467 | int ret = 0; | |
7dd01aef | 468 | |
8b6e70fc | 469 | address = pt_regs_read_reg(regs, rt); |
7dd01aef | 470 | |
9dbd5bb2 SP |
471 | switch (crm) { |
472 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAU: /* DC CVAU, gets promoted */ | |
473 | __user_cache_maint("dc civac", address, ret); | |
474 | break; | |
475 | case ESR_ELx_SYS64_ISS_CRM_DC_CVAC: /* DC CVAC, gets promoted */ | |
476 | __user_cache_maint("dc civac", address, ret); | |
477 | break; | |
478 | case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC: /* DC CIVAC */ | |
479 | __user_cache_maint("dc civac", address, ret); | |
480 | break; | |
481 | case ESR_ELx_SYS64_ISS_CRM_IC_IVAU: /* IC IVAU */ | |
482 | __user_cache_maint("ic ivau", address, ret); | |
483 | break; | |
484 | default: | |
7dd01aef AP |
485 | force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); |
486 | return; | |
487 | } | |
488 | ||
489 | if (ret) | |
490 | arm64_notify_segfault(regs, address); | |
491 | else | |
492 | regs->pc += 4; | |
493 | } | |
494 | ||
116c81f4 SP |
495 | static void ctr_read_handler(unsigned int esr, struct pt_regs *regs) |
496 | { | |
497 | int rt = (esr & ESR_ELx_SYS64_ISS_RT_MASK) >> ESR_ELx_SYS64_ISS_RT_SHIFT; | |
8b6e70fc MR |
498 | unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0); |
499 | ||
500 | pt_regs_write_reg(regs, rt, val); | |
116c81f4 | 501 | |
116c81f4 SP |
502 | regs->pc += 4; |
503 | } | |
504 | ||
9dbd5bb2 SP |
505 | struct sys64_hook { |
506 | unsigned int esr_mask; | |
507 | unsigned int esr_val; | |
508 | void (*handler)(unsigned int esr, struct pt_regs *regs); | |
509 | }; | |
510 | ||
511 | static struct sys64_hook sys64_hooks[] = { | |
512 | { | |
513 | .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, | |
514 | .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, | |
515 | .handler = user_cache_maint_handler, | |
516 | }, | |
116c81f4 SP |
517 | { |
518 | /* Trap read access to CTR_EL0 */ | |
519 | .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK, | |
520 | .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ, | |
521 | .handler = ctr_read_handler, | |
522 | }, | |
9dbd5bb2 SP |
523 | {}, |
524 | }; | |
525 | ||
526 | asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) | |
527 | { | |
528 | struct sys64_hook *hook; | |
529 | ||
530 | for (hook = sys64_hooks; hook->handler; hook++) | |
531 | if ((hook->esr_mask & esr) == hook->esr_val) { | |
532 | hook->handler(esr, regs); | |
533 | return; | |
534 | } | |
535 | ||
49f6cba6 MR |
536 | /* |
537 | * New SYS instructions may previously have been undefined at EL0. Fall | |
538 | * back to our usual undefined instruction handler so that we handle | |
539 | * these consistently. | |
540 | */ | |
541 | do_undefinstr(regs); | |
9dbd5bb2 SP |
542 | } |
543 | ||
60ffc30d CM |
544 | long compat_arm_syscall(struct pt_regs *regs); |
545 | ||
546 | asmlinkage long do_ni_syscall(struct pt_regs *regs) | |
547 | { | |
548 | #ifdef CONFIG_COMPAT | |
549 | long ret; | |
550 | if (is_compat_task()) { | |
551 | ret = compat_arm_syscall(regs); | |
552 | if (ret != -ENOSYS) | |
553 | return ret; | |
554 | } | |
555 | #endif | |
556 | ||
86dca36e | 557 | if (show_unhandled_signals_ratelimited()) { |
60ffc30d CM |
558 | pr_info("%s[%d]: syscall %d\n", current->comm, |
559 | task_pid_nr(current), (int)regs->syscallno); | |
560 | dump_instr("", regs); | |
561 | if (user_mode(regs)) | |
562 | __show_regs(regs); | |
563 | } | |
564 | ||
565 | return sys_ni_syscall(); | |
566 | } | |
567 | ||
60a1f02c MR |
568 | static const char *esr_class_str[] = { |
569 | [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", | |
570 | [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", | |
571 | [ESR_ELx_EC_WFx] = "WFI/WFE", | |
572 | [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", | |
573 | [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", | |
574 | [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", | |
575 | [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", | |
576 | [ESR_ELx_EC_FP_ASIMD] = "ASIMD", | |
577 | [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", | |
578 | [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", | |
579 | [ESR_ELx_EC_ILL] = "PSTATE.IL", | |
580 | [ESR_ELx_EC_SVC32] = "SVC (AArch32)", | |
581 | [ESR_ELx_EC_HVC32] = "HVC (AArch32)", | |
582 | [ESR_ELx_EC_SMC32] = "SMC (AArch32)", | |
583 | [ESR_ELx_EC_SVC64] = "SVC (AArch64)", | |
584 | [ESR_ELx_EC_HVC64] = "HVC (AArch64)", | |
585 | [ESR_ELx_EC_SMC64] = "SMC (AArch64)", | |
586 | [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", | |
587 | [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", | |
588 | [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", | |
589 | [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", | |
590 | [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", | |
591 | [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", | |
592 | [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", | |
593 | [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", | |
594 | [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", | |
595 | [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", | |
596 | [ESR_ELx_EC_SERROR] = "SError", | |
597 | [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", | |
598 | [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", | |
599 | [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", | |
600 | [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", | |
601 | [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", | |
602 | [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", | |
603 | [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", | |
604 | [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", | |
605 | [ESR_ELx_EC_BRK64] = "BRK (AArch64)", | |
606 | }; | |
607 | ||
608 | const char *esr_get_class_string(u32 esr) | |
609 | { | |
275f344b | 610 | return esr_class_str[ESR_ELx_EC(esr)]; |
60a1f02c MR |
611 | } |
612 | ||
60ffc30d | 613 | /* |
7d9e8f71 MR |
614 | * bad_mode handles the impossible case in the exception vector. This is always |
615 | * fatal. | |
60ffc30d CM |
616 | */ |
617 | asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) | |
618 | { | |
619 | console_verbose(); | |
620 | ||
8051f4d1 MR |
621 | pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n", |
622 | handler[reason], smp_processor_id(), esr, | |
623 | esr_get_class_string(esr)); | |
7d9e8f71 MR |
624 | |
625 | die("Oops - bad mode", regs, 0); | |
626 | local_irq_disable(); | |
627 | panic("bad mode"); | |
628 | } | |
629 | ||
630 | /* | |
631 | * bad_el0_sync handles unexpected, but potentially recoverable synchronous | |
632 | * exceptions taken from EL0. Unlike bad_mode, this returns. | |
633 | */ | |
634 | asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) | |
635 | { | |
636 | siginfo_t info; | |
637 | void __user *pc = (void __user *)instruction_pointer(regs); | |
638 | console_verbose(); | |
639 | ||
640 | pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n", | |
641 | smp_processor_id(), esr, esr_get_class_string(esr)); | |
9955ac47 MR |
642 | __show_regs(regs); |
643 | ||
644 | info.si_signo = SIGILL; | |
645 | info.si_errno = 0; | |
646 | info.si_code = ILL_ILLOPC; | |
647 | info.si_addr = pc; | |
60ffc30d | 648 | |
7d9e8f71 MR |
649 | current->thread.fault_address = 0; |
650 | current->thread.fault_code = 0; | |
651 | ||
652 | force_sig_info(info.si_signo, &info, current); | |
60ffc30d CM |
653 | } |
654 | ||
655 | void __pte_error(const char *file, int line, unsigned long val) | |
656 | { | |
c9cd0ed9 | 657 | pr_err("%s:%d: bad pte %016lx.\n", file, line, val); |
60ffc30d CM |
658 | } |
659 | ||
660 | void __pmd_error(const char *file, int line, unsigned long val) | |
661 | { | |
c9cd0ed9 | 662 | pr_err("%s:%d: bad pmd %016lx.\n", file, line, val); |
60ffc30d CM |
663 | } |
664 | ||
c79b954b JL |
665 | void __pud_error(const char *file, int line, unsigned long val) |
666 | { | |
c9cd0ed9 | 667 | pr_err("%s:%d: bad pud %016lx.\n", file, line, val); |
c79b954b JL |
668 | } |
669 | ||
60ffc30d CM |
670 | void __pgd_error(const char *file, int line, unsigned long val) |
671 | { | |
c9cd0ed9 | 672 | pr_err("%s:%d: bad pgd %016lx.\n", file, line, val); |
60ffc30d CM |
673 | } |
674 | ||
9fb7410f DM |
675 | /* GENERIC_BUG traps */ |
676 | ||
677 | int is_valid_bugaddr(unsigned long addr) | |
678 | { | |
679 | /* | |
680 | * bug_handler() only called for BRK #BUG_BRK_IMM. | |
681 | * So the answer is trivial -- any spurious instances with no | |
682 | * bug table entry will be rejected by report_bug() and passed | |
683 | * back to the debug-monitors code and handled as a fatal | |
684 | * unexpected debug exception. | |
685 | */ | |
686 | return 1; | |
687 | } | |
688 | ||
689 | static int bug_handler(struct pt_regs *regs, unsigned int esr) | |
690 | { | |
691 | if (user_mode(regs)) | |
692 | return DBG_HOOK_ERROR; | |
693 | ||
694 | switch (report_bug(regs->pc, regs)) { | |
695 | case BUG_TRAP_TYPE_BUG: | |
696 | die("Oops - BUG", regs, 0); | |
697 | break; | |
698 | ||
699 | case BUG_TRAP_TYPE_WARN: | |
a4653228 DM |
700 | /* Ideally, report_bug() should backtrace for us... but no. */ |
701 | dump_backtrace(regs, NULL); | |
9fb7410f DM |
702 | break; |
703 | ||
704 | default: | |
705 | /* unknown/unrecognised bug trap type */ | |
706 | return DBG_HOOK_ERROR; | |
707 | } | |
708 | ||
709 | /* If thread survives, skip over the BUG instruction and continue: */ | |
710 | regs->pc += AARCH64_INSN_SIZE; /* skip BRK and resume */ | |
711 | return DBG_HOOK_HANDLED; | |
712 | } | |
713 | ||
714 | static struct break_hook bug_break_hook = { | |
715 | .esr_val = 0xf2000000 | BUG_BRK_IMM, | |
716 | .esr_mask = 0xffffffff, | |
717 | .fn = bug_handler, | |
718 | }; | |
719 | ||
720 | /* | |
721 | * Initial handler for AArch64 BRK exceptions | |
722 | * This handler only used until debug_traps_init(). | |
723 | */ | |
724 | int __init early_brk64(unsigned long addr, unsigned int esr, | |
725 | struct pt_regs *regs) | |
726 | { | |
727 | return bug_handler(regs, esr) != DBG_HOOK_HANDLED; | |
728 | } | |
729 | ||
730 | /* This registration must happen early, before debug_traps_init(). */ | |
60ffc30d CM |
731 | void __init trap_init(void) |
732 | { | |
9fb7410f | 733 | register_break_hook(&bug_break_hook); |
60ffc30d | 734 | } |