Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/arch/arm/kernel/traps.c | |
4 | * | |
ab72b007 | 5 | * Copyright (C) 1995-2009 Russell King |
1da177e4 LT |
6 | * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds |
7 | * | |
1da177e4 LT |
8 | * 'traps.c' handles hardware exceptions after we have saved some state in |
9 | * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably | |
10 | * kill the offending process. | |
11 | */ | |
1da177e4 | 12 | #include <linux/signal.h> |
1da177e4 | 13 | #include <linux/personality.h> |
1da177e4 | 14 | #include <linux/kallsyms.h> |
a9221de6 RK |
15 | #include <linux/spinlock.h> |
16 | #include <linux/uaccess.h> | |
67306da6 | 17 | #include <linux/hardirq.h> |
a9221de6 | 18 | #include <linux/kdebug.h> |
eb0146da | 19 | #include <linux/kprobes.h> |
a9221de6 RK |
20 | #include <linux/module.h> |
21 | #include <linux/kexec.h> | |
87e040b6 | 22 | #include <linux/bug.h> |
a9221de6 | 23 | #include <linux/delay.h> |
1da177e4 | 24 | #include <linux/init.h> |
3f07c014 | 25 | #include <linux/sched/signal.h> |
b17b0153 | 26 | #include <linux/sched/debug.h> |
68db0cf1 | 27 | #include <linux/sched/task_stack.h> |
c0e7f7ee | 28 | #include <linux/irq.h> |
0069455b | 29 | #include <linux/vmalloc.h> |
1da177e4 | 30 | |
60063497 | 31 | #include <linux/atomic.h> |
1da177e4 | 32 | #include <asm/cacheflush.h> |
5a567d78 | 33 | #include <asm/exception.h> |
b9baf5c8 | 34 | #include <asm/spectre.h> |
1da177e4 LT |
35 | #include <asm/unistd.h> |
36 | #include <asm/traps.h> | |
49432d4a | 37 | #include <asm/ptrace.h> |
bff595c1 | 38 | #include <asm/unwind.h> |
f159f4ed | 39 | #include <asm/tls.h> |
8cdfdf7f | 40 | #include <asm/stacktrace.h> |
9f97da78 | 41 | #include <asm/system_misc.h> |
a79a0cb1 | 42 | #include <asm/opcodes.h> |
1da177e4 | 43 | |
49432d4a | 44 | |
29c350bf RK |
45 | static const char *handler[]= { |
46 | "prefetch abort", | |
47 | "data abort", | |
48 | "address exception", | |
49 | "interrupt", | |
50 | "undefined instruction", | |
51 | }; | |
1da177e4 | 52 | |
247055aa CM |
53 | void *vectors_page; |
54 | ||
1da177e4 LT |
55 | #ifdef CONFIG_DEBUG_USER |
56 | unsigned int user_debug; | |
57 | ||
58 | static int __init user_debug_setup(char *str) | |
59 | { | |
60 | get_option(&str, &user_debug); | |
61 | return 1; | |
62 | } | |
63 | __setup("user_debug=", user_debug_setup); | |
64 | #endif | |
65 | ||
5489ab50 DS |
66 | void dump_backtrace_entry(unsigned long where, unsigned long from, |
67 | unsigned long frame, const char *loglvl) | |
1da177e4 | 68 | { |
40ff1ddb VW |
69 | unsigned long end = frame + 4 + sizeof(struct pt_regs); |
70 | ||
d4664b6c AB |
71 | if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) && |
72 | IS_ENABLED(CONFIG_CC_IS_GCC) && | |
73 | end > ALIGN(frame, THREAD_SIZE)) { | |
74 | /* | |
75 | * If we are walking past the end of the stack, it may be due | |
76 | * to the fact that we are on an IRQ or overflow stack. In this | |
77 | * case, we can load the address of the other stack from the | |
78 | * frame record. | |
79 | */ | |
80 | frame = ((unsigned long *)frame)[-2] - 4; | |
81 | end = frame + 4 + sizeof(struct pt_regs); | |
82 | } | |
83 | ||
b0343ab3 RK |
84 | #ifndef CONFIG_KALLSYMS |
85 | printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n", | |
86 | loglvl, where, from); | |
87 | #elif defined CONFIG_BACKTRACE_VERBOSE | |
34135eac DS |
88 | printk("%s[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", |
89 | loglvl, where, (void *)where, from, (void *)from); | |
1da177e4 | 90 | #else |
b0343ab3 | 91 | printk("%s %ps from %pS\n", loglvl, (void *)where, (void *)from); |
1da177e4 | 92 | #endif |
7ab3f8d5 | 93 | |
40ff1ddb | 94 | if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE)) |
34135eac | 95 | dump_mem(loglvl, "Exception stack", frame + 4, end); |
1da177e4 LT |
96 | } |
97 | ||
5489ab50 | 98 | void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl) |
24c66dfd RK |
99 | { |
100 | char str[80], *p; | |
101 | unsigned int x; | |
102 | int reg; | |
103 | ||
104 | for (reg = 10, x = 0, p = str; reg >= 0; reg--) { | |
105 | if (instruction & BIT(reg)) { | |
106 | p += sprintf(p, " r%d:%08x", reg, *stack--); | |
107 | if (++x == 6) { | |
108 | x = 0; | |
109 | p = str; | |
34135eac | 110 | printk("%s%s\n", loglvl, str); |
24c66dfd RK |
111 | } |
112 | } | |
113 | } | |
114 | if (p != str) | |
34135eac | 115 | printk("%s%s\n", loglvl, str); |
24c66dfd RK |
116 | } |
117 | ||
bff595c1 | 118 | #ifndef CONFIG_ARM_UNWIND |
1da177e4 LT |
119 | /* |
120 | * Stack pointers should always be within the kernels view of | |
121 | * physical memory. If it is not there, then we can't dump | |
122 | * out any information relating to the stack. | |
123 | */ | |
124 | static int verify_stack(unsigned long sp) | |
125 | { | |
09d9bae0 | 126 | if (sp < PAGE_OFFSET || |
a1c510d0 AB |
127 | (!IS_ENABLED(CONFIG_VMAP_STACK) && |
128 | sp > (unsigned long)high_memory && high_memory != NULL)) | |
1da177e4 LT |
129 | return -EFAULT; |
130 | ||
131 | return 0; | |
132 | } | |
bff595c1 | 133 | #endif |
1da177e4 LT |
134 | |
135 | /* | |
136 | * Dump out the contents of some memory nicely... | |
137 | */ | |
8cdfdf7f AB |
138 | void dump_mem(const char *lvl, const char *str, unsigned long bottom, |
139 | unsigned long top) | |
1da177e4 | 140 | { |
d191fe09 | 141 | unsigned long first; |
1da177e4 LT |
142 | int i; |
143 | ||
e40c2ec6 | 144 | printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top); |
1da177e4 | 145 | |
d191fe09 RK |
146 | for (first = bottom & ~31; first < top; first += 32) { |
147 | unsigned long p; | |
148 | char str[sizeof(" 12345678") * 8 + 1]; | |
1da177e4 | 149 | |
d191fe09 RK |
150 | memset(str, ' ', sizeof(str)); |
151 | str[sizeof(str) - 1] = '\0'; | |
1da177e4 | 152 | |
d191fe09 RK |
153 | for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { |
154 | if (p >= bottom && p < top) { | |
155 | unsigned long val; | |
00d43d13 | 156 | if (!get_kernel_nofault(val, (unsigned long *)p)) |
d191fe09 RK |
157 | sprintf(str + i * 9, " %08lx", val); |
158 | else | |
159 | sprintf(str + i * 9, " ????????"); | |
1da177e4 LT |
160 | } |
161 | } | |
e40c2ec6 | 162 | printk("%s%04lx:%s\n", lvl, first & 0xffff, str); |
1da177e4 | 163 | } |
1da177e4 LT |
164 | } |
165 | ||
344179fc | 166 | static void dump_instr(const char *lvl, struct pt_regs *regs) |
1da177e4 LT |
167 | { |
168 | unsigned long addr = instruction_pointer(regs); | |
169 | const int thumb = thumb_mode(regs); | |
170 | const int width = thumb ? 4 : 8; | |
d191fe09 | 171 | char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; |
1da177e4 LT |
172 | int i; |
173 | ||
174 | /* | |
b9dd05c7 MR |
175 | * Note that we now dump the code first, just in case the backtrace |
176 | * kills us. | |
1da177e4 | 177 | */ |
1da177e4 | 178 | |
a9011580 | 179 | for (i = -4; i < 1 + !!thumb; i++) { |
1da177e4 LT |
180 | unsigned int val, bad; |
181 | ||
21d0798a ZL |
182 | if (thumb) { |
183 | u16 tmp; | |
184 | ||
185 | if (user_mode(regs)) | |
186 | bad = get_user(tmp, &((u16 __user *)addr)[i]); | |
187 | else | |
188 | bad = get_kernel_nofault(tmp, &((u16 *)addr)[i]); | |
189 | ||
ba290d4f | 190 | val = __mem_to_opcode_thumb16(tmp); |
344179fc | 191 | } else { |
21d0798a ZL |
192 | if (user_mode(regs)) |
193 | bad = get_user(val, &((u32 __user *)addr)[i]); | |
344179fc | 194 | else |
21d0798a | 195 | bad = get_kernel_nofault(val, &((u32 *)addr)[i]); |
ba290d4f ZL |
196 | |
197 | val = __mem_to_opcode_arm(val); | |
344179fc | 198 | } |
1da177e4 LT |
199 | |
200 | if (!bad) | |
d191fe09 RK |
201 | p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ", |
202 | width, val); | |
1da177e4 | 203 | else { |
d191fe09 | 204 | p += sprintf(p, "bad PC value"); |
1da177e4 LT |
205 | break; |
206 | } | |
207 | } | |
e40c2ec6 | 208 | printk("%sCode: %s\n", lvl, str); |
b9dd05c7 | 209 | } |
1da177e4 | 210 | |
bff595c1 | 211 | #ifdef CONFIG_ARM_UNWIND |
09cffeca ZL |
212 | void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, |
213 | const char *loglvl) | |
bff595c1 | 214 | { |
ee65ca01 | 215 | unwind_backtrace(regs, tsk, loglvl); |
bff595c1 CM |
216 | } |
217 | #else | |
09cffeca ZL |
218 | void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, |
219 | const char *loglvl) | |
1da177e4 | 220 | { |
67a94c23 | 221 | unsigned int fp, mode; |
1da177e4 LT |
222 | int ok = 1; |
223 | ||
daa55957 | 224 | printk("%sCall trace: ", loglvl); |
67a94c23 CM |
225 | |
226 | if (!tsk) | |
227 | tsk = current; | |
228 | ||
229 | if (regs) { | |
49432d4a | 230 | fp = frame_pointer(regs); |
67a94c23 CM |
231 | mode = processor_mode(regs); |
232 | } else if (tsk != current) { | |
233 | fp = thread_saved_fp(tsk); | |
234 | mode = 0x10; | |
235 | } else { | |
236 | asm("mov %0, fp" : "=r" (fp) : : "cc"); | |
237 | mode = 0x10; | |
238 | } | |
239 | ||
1da177e4 | 240 | if (!fp) { |
4ed89f22 | 241 | pr_cont("no frame pointer"); |
1da177e4 LT |
242 | ok = 0; |
243 | } else if (verify_stack(fp)) { | |
4ed89f22 | 244 | pr_cont("invalid frame pointer 0x%08x", fp); |
1da177e4 | 245 | ok = 0; |
55205823 | 246 | } else if (fp < (unsigned long)end_of_stack(tsk)) |
4ed89f22 RK |
247 | pr_cont("frame pointer underflow"); |
248 | pr_cont("\n"); | |
1da177e4 LT |
249 | |
250 | if (ok) | |
ee65ca01 | 251 | c_backtrace(fp, mode, loglvl); |
1da177e4 | 252 | } |
bff595c1 | 253 | #endif |
1da177e4 | 254 | |
9cb8f069 | 255 | void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl) |
1da177e4 | 256 | { |
a4502d04 | 257 | dump_backtrace(NULL, tsk, loglvl); |
1da177e4 LT |
258 | barrier(); |
259 | } | |
260 | ||
d9202429 RK |
261 | #ifdef CONFIG_PREEMPT |
262 | #define S_PREEMPT " PREEMPT" | |
e7289c6d TG |
263 | #elif defined(CONFIG_PREEMPT_RT) |
264 | #define S_PREEMPT " PREEMPT_RT" | |
d9202429 RK |
265 | #else |
266 | #define S_PREEMPT "" | |
267 | #endif | |
268 | #ifdef CONFIG_SMP | |
269 | #define S_SMP " SMP" | |
270 | #else | |
271 | #define S_SMP "" | |
272 | #endif | |
8211ca65 RK |
273 | #ifdef CONFIG_THUMB2_KERNEL |
274 | #define S_ISA " THUMB2" | |
275 | #else | |
276 | #define S_ISA " ARM" | |
277 | #endif | |
d9202429 | 278 | |
02df19b4 | 279 | static int __die(const char *str, int err, struct pt_regs *regs) |
1da177e4 | 280 | { |
02df19b4 | 281 | struct task_struct *tsk = current; |
1da177e4 | 282 | static int die_counter; |
a9221de6 | 283 | int ret; |
1da177e4 | 284 | |
4ed89f22 RK |
285 | pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n", |
286 | str, err, ++die_counter); | |
a9221de6 RK |
287 | |
288 | /* trap and error numbers are mostly meaningless on ARM */ | |
289 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); | |
290 | if (ret == NOTIFY_STOP) | |
02df19b4 | 291 | return 1; |
a9221de6 | 292 | |
1da177e4 | 293 | print_modules(); |
652a12ef | 294 | __show_regs(regs); |
5aa6b70e | 295 | __show_regs_alloc_free(regs); |
4ed89f22 RK |
296 | pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", |
297 | TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); | |
1da177e4 LT |
298 | |
299 | if (!user_mode(regs) || in_interrupt()) { | |
e40c2ec6 | 300 | dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, |
a1c510d0 AB |
301 | ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN) |
302 | + THREAD_SIZE); | |
ee65ca01 | 303 | dump_backtrace(regs, tsk, KERN_EMERG); |
e40c2ec6 | 304 | dump_instr(KERN_EMERG, regs); |
1da177e4 | 305 | } |
a9221de6 | 306 | |
02df19b4 | 307 | return 0; |
d362979a | 308 | } |
1da177e4 | 309 | |
02df19b4 RV |
310 | static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
311 | static int die_owner = -1; | |
312 | static unsigned int die_nest_count; | |
d362979a | 313 | |
02df19b4 | 314 | static unsigned long oops_begin(void) |
d362979a | 315 | { |
02df19b4 RV |
316 | int cpu; |
317 | unsigned long flags; | |
d362979a | 318 | |
d9202429 RK |
319 | oops_enter(); |
320 | ||
02df19b4 RV |
321 | /* racy, but better than risking deadlock. */ |
322 | raw_local_irq_save(flags); | |
323 | cpu = smp_processor_id(); | |
324 | if (!arch_spin_trylock(&die_lock)) { | |
325 | if (cpu == die_owner) | |
326 | /* nested oops. should stop eventually */; | |
327 | else | |
328 | arch_spin_lock(&die_lock); | |
329 | } | |
330 | die_nest_count++; | |
331 | die_owner = cpu; | |
03a6e5bd | 332 | console_verbose(); |
d362979a | 333 | bust_spinlocks(1); |
02df19b4 RV |
334 | return flags; |
335 | } | |
a9221de6 | 336 | |
02df19b4 RV |
337 | static void oops_end(unsigned long flags, struct pt_regs *regs, int signr) |
338 | { | |
339 | if (regs && kexec_should_crash(current)) | |
a9221de6 RK |
340 | crash_kexec(regs); |
341 | ||
1da177e4 | 342 | bust_spinlocks(0); |
02df19b4 | 343 | die_owner = -1; |
373d4d09 | 344 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); |
02df19b4 RV |
345 | die_nest_count--; |
346 | if (!die_nest_count) | |
347 | /* Nest count reaches zero, release the lock. */ | |
348 | arch_spin_unlock(&die_lock); | |
349 | raw_local_irq_restore(flags); | |
03a6e5bd | 350 | oops_exit(); |
31867499 | 351 | |
d9202429 RK |
352 | if (in_interrupt()) |
353 | panic("Fatal exception in interrupt"); | |
cea6a4ba | 354 | if (panic_on_oops) |
012c437d | 355 | panic("Fatal exception"); |
02df19b4 | 356 | if (signr) |
0e25498f | 357 | make_task_dead(signr); |
02df19b4 RV |
358 | } |
359 | ||
360 | /* | |
361 | * This function is protected against re-entrancy. | |
362 | */ | |
363 | void die(const char *str, struct pt_regs *regs, int err) | |
364 | { | |
365 | enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE; | |
366 | unsigned long flags = oops_begin(); | |
367 | int sig = SIGSEGV; | |
368 | ||
369 | if (!user_mode(regs)) | |
370 | bug_type = report_bug(regs->ARM_pc, regs); | |
371 | if (bug_type != BUG_TRAP_TYPE_NONE) | |
372 | str = "Oops - BUG"; | |
373 | ||
374 | if (__die(str, err, regs)) | |
375 | sig = 0; | |
376 | ||
377 | oops_end(flags, regs, sig); | |
1da177e4 LT |
378 | } |
379 | ||
1eeb66a1 | 380 | void arm_notify_die(const char *str, struct pt_regs *regs, |
05e792e3 EB |
381 | int signo, int si_code, void __user *addr, |
382 | unsigned long err, unsigned long trap) | |
1da177e4 LT |
383 | { |
384 | if (user_mode(regs)) { | |
385 | current->thread.error_code = err; | |
386 | current->thread.trap_no = trap; | |
387 | ||
2e1661d2 | 388 | force_sig_fault(signo, si_code, addr); |
1da177e4 LT |
389 | } else { |
390 | die(str, regs, err); | |
391 | } | |
392 | } | |
393 | ||
87e040b6 SG |
394 | #ifdef CONFIG_GENERIC_BUG |
395 | ||
396 | int is_valid_bugaddr(unsigned long pc) | |
397 | { | |
398 | #ifdef CONFIG_THUMB2_KERNEL | |
63328070 BD |
399 | u16 bkpt; |
400 | u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE); | |
87e040b6 | 401 | #else |
63328070 BD |
402 | u32 bkpt; |
403 | u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE); | |
87e040b6 SG |
404 | #endif |
405 | ||
0c389d89 | 406 | if (get_kernel_nofault(bkpt, (void *)pc)) |
87e040b6 SG |
407 | return 0; |
408 | ||
63328070 | 409 | return bkpt == insn; |
87e040b6 SG |
410 | } |
411 | ||
412 | #endif | |
413 | ||
1da177e4 | 414 | static LIST_HEAD(undef_hook); |
bd31b859 | 415 | static DEFINE_RAW_SPINLOCK(undef_lock); |
1da177e4 LT |
416 | |
417 | void register_undef_hook(struct undef_hook *hook) | |
418 | { | |
109d89ca RK |
419 | unsigned long flags; |
420 | ||
bd31b859 | 421 | raw_spin_lock_irqsave(&undef_lock, flags); |
1da177e4 | 422 | list_add(&hook->node, &undef_hook); |
bd31b859 | 423 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
1da177e4 LT |
424 | } |
425 | ||
426 | void unregister_undef_hook(struct undef_hook *hook) | |
427 | { | |
109d89ca RK |
428 | unsigned long flags; |
429 | ||
bd31b859 | 430 | raw_spin_lock_irqsave(&undef_lock, flags); |
1da177e4 | 431 | list_del(&hook->node); |
bd31b859 | 432 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
1da177e4 LT |
433 | } |
434 | ||
eb0146da MH |
435 | static nokprobe_inline |
436 | int call_undef_hook(struct pt_regs *regs, unsigned int instr) | |
b03a5b75 RK |
437 | { |
438 | struct undef_hook *hook; | |
439 | unsigned long flags; | |
440 | int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL; | |
441 | ||
bd31b859 | 442 | raw_spin_lock_irqsave(&undef_lock, flags); |
b03a5b75 RK |
443 | list_for_each_entry(hook, &undef_hook, node) |
444 | if ((instr & hook->instr_mask) == hook->instr_val && | |
445 | (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val) | |
446 | fn = hook->fn; | |
bd31b859 | 447 | raw_spin_unlock_irqrestore(&undef_lock, flags); |
b03a5b75 RK |
448 | |
449 | return fn ? fn(regs, instr) : 1; | |
450 | } | |
451 | ||
c6089061 | 452 | asmlinkage void do_undefinstr(struct pt_regs *regs) |
1da177e4 | 453 | { |
1da177e4 | 454 | unsigned int instr; |
1da177e4 LT |
455 | void __user *pc; |
456 | ||
1da177e4 | 457 | pc = (void __user *)instruction_pointer(regs); |
dfc544c7 DW |
458 | |
459 | if (processor_mode(regs) == SVC_MODE) { | |
592201a9 JM |
460 | #ifdef CONFIG_THUMB2_KERNEL |
461 | if (thumb_mode(regs)) { | |
a79a0cb1 | 462 | instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]); |
592201a9 | 463 | if (is_wide_instruction(instr)) { |
a79a0cb1 BD |
464 | u16 inst2; |
465 | inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]); | |
466 | instr = __opcode_thumb32_compose(instr, inst2); | |
592201a9 JM |
467 | } |
468 | } else | |
469 | #endif | |
a79a0cb1 | 470 | instr = __mem_to_opcode_arm(*(u32 *) pc); |
dfc544c7 | 471 | } else if (thumb_mode(regs)) { |
2b2040af WD |
472 | if (get_user(instr, (u16 __user *)pc)) |
473 | goto die_sig; | |
a79a0cb1 | 474 | instr = __mem_to_opcode_thumb16(instr); |
592201a9 JM |
475 | if (is_wide_instruction(instr)) { |
476 | unsigned int instr2; | |
2b2040af WD |
477 | if (get_user(instr2, (u16 __user *)pc+1)) |
478 | goto die_sig; | |
a79a0cb1 BD |
479 | instr2 = __mem_to_opcode_thumb16(instr2); |
480 | instr = __opcode_thumb32_compose(instr, instr2); | |
592201a9 | 481 | } |
d6cd9894 TK |
482 | } else { |
483 | if (get_user(instr, (u32 __user *)pc)) | |
484 | goto die_sig; | |
a79a0cb1 | 485 | instr = __mem_to_opcode_arm(instr); |
1da177e4 LT |
486 | } |
487 | ||
b03a5b75 RK |
488 | if (call_undef_hook(regs, instr) == 0) |
489 | return; | |
1da177e4 | 490 | |
2b2040af | 491 | die_sig: |
1da177e4 LT |
492 | #ifdef CONFIG_DEBUG_USER |
493 | if (user_debug & UDBG_UNDEFINED) { | |
ee50036b | 494 | pr_info("%s (%d): undefined instruction: pc=%px\n", |
19c5870c | 495 | current->comm, task_pid_nr(current), pc); |
b5b6b5f5 | 496 | __show_regs(regs); |
e40c2ec6 | 497 | dump_instr(KERN_INFO, regs); |
1da177e4 LT |
498 | } |
499 | #endif | |
05e792e3 EB |
500 | arm_notify_die("Oops - undefined instruction", regs, |
501 | SIGILL, ILL_ILLOPC, pc, 0, 6); | |
1da177e4 | 502 | } |
eb0146da | 503 | NOKPROBE_SYMBOL(do_undefinstr) |
1da177e4 | 504 | |
c0e7f7ee DT |
505 | /* |
506 | * Handle FIQ similarly to NMI on x86 systems. | |
507 | * | |
508 | * The runtime environment for NMIs is extremely restrictive | |
509 | * (NMIs can pre-empt critical sections meaning almost all locking is | |
510 | * forbidden) meaning this default FIQ handling must only be used in | |
511 | * circumstances where non-maskability improves robustness, such as | |
512 | * watchdog or debug logic. | |
513 | * | |
514 | * This handler is not appropriate for general purpose use in drivers | |
515 | * platform code and can be overrideen using set_fiq_handler. | |
516 | */ | |
517 | asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs) | |
518 | { | |
519 | struct pt_regs *old_regs = set_irq_regs(regs); | |
520 | ||
521 | nmi_enter(); | |
522 | ||
523 | /* nop. FIQ handlers for special arch/arm features can be added here. */ | |
524 | ||
525 | nmi_exit(); | |
526 | ||
527 | set_irq_regs(old_regs); | |
528 | } | |
529 | ||
1da177e4 LT |
530 | /* |
531 | * bad_mode handles the impossible case in the vectors. If you see one of | |
532 | * these, then it's extremely serious, and could mean you have buggy hardware. | |
533 | * It never returns, and never tries to sync. We hope that we can at least | |
534 | * dump out some state information... | |
535 | */ | |
ae0a846e | 536 | asmlinkage void bad_mode(struct pt_regs *regs, int reason) |
1da177e4 LT |
537 | { |
538 | console_verbose(); | |
539 | ||
4ed89f22 | 540 | pr_crit("Bad mode in %s handler detected\n", handler[reason]); |
1da177e4 LT |
541 | |
542 | die("Oops - bad mode", regs, 0); | |
543 | local_irq_disable(); | |
544 | panic("bad mode"); | |
545 | } | |
546 | ||
547 | static int bad_syscall(int n, struct pt_regs *regs) | |
548 | { | |
a4980448 RW |
549 | if ((current->personality & PER_MASK) != PER_LINUX) { |
550 | send_sig(SIGSEGV, current, 1); | |
1da177e4 LT |
551 | return regs->ARM_r0; |
552 | } | |
553 | ||
554 | #ifdef CONFIG_DEBUG_USER | |
555 | if (user_debug & UDBG_SYSCALL) { | |
4ed89f22 | 556 | pr_err("[%d] %s: obsolete system call %08x.\n", |
19c5870c | 557 | task_pid_nr(current), current->comm, n); |
e40c2ec6 | 558 | dump_instr(KERN_ERR, regs); |
1da177e4 LT |
559 | } |
560 | #endif | |
561 | ||
05e792e3 EB |
562 | arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP, |
563 | (void __user *)instruction_pointer(regs) - | |
564 | (thumb_mode(regs) ? 2 : 4), | |
565 | n, 0); | |
1da177e4 LT |
566 | |
567 | return regs->ARM_r0; | |
568 | } | |
569 | ||
c5102f59 | 570 | static inline int |
28256d61 WD |
571 | __do_cache_op(unsigned long start, unsigned long end) |
572 | { | |
573 | int ret; | |
28256d61 WD |
574 | |
575 | do { | |
b31459ad JM |
576 | unsigned long chunk = min(PAGE_SIZE, end - start); |
577 | ||
3f4aa45c VM |
578 | if (fatal_signal_pending(current)) |
579 | return 0; | |
28256d61 | 580 | |
fca7f8e6 | 581 | ret = flush_icache_user_range(start, start + chunk); |
28256d61 WD |
582 | if (ret) |
583 | return ret; | |
584 | ||
585 | cond_resched(); | |
586 | start += chunk; | |
587 | } while (start < end); | |
588 | ||
589 | return 0; | |
590 | } | |
591 | ||
c5102f59 | 592 | static inline int |
1da177e4 LT |
593 | do_cache_op(unsigned long start, unsigned long end, int flags) |
594 | { | |
1da177e4 | 595 | if (end < start || flags) |
c5102f59 | 596 | return -EINVAL; |
1da177e4 | 597 | |
23fc539e | 598 | if (!access_ok((void __user *)start, end - start)) |
97c72d89 | 599 | return -EFAULT; |
1da177e4 | 600 | |
28256d61 | 601 | return __do_cache_op(start, end); |
1da177e4 LT |
602 | } |
603 | ||
604 | /* | |
605 | * Handle all unrecognised system calls. | |
606 | * 0x9f0000 - 0x9fffff are some more esoteric system calls | |
607 | */ | |
608 | #define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE) | |
609 | asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |
610 | { | |
3f2829a3 | 611 | if ((no >> 16) != (__ARM_NR_BASE>> 16)) |
1da177e4 LT |
612 | return bad_syscall(no, regs); |
613 | ||
614 | switch (no & 0xffff) { | |
615 | case 0: /* branch through 0 */ | |
05e792e3 EB |
616 | arm_notify_die("branch through zero", regs, |
617 | SIGSEGV, SEGV_MAPERR, NULL, 0, 0); | |
1da177e4 LT |
618 | return 0; |
619 | ||
620 | case NR(breakpoint): /* SWI BREAK_POINT */ | |
621 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | |
e9a06509 | 622 | ptrace_break(regs); |
1da177e4 LT |
623 | return regs->ARM_r0; |
624 | ||
625 | /* | |
626 | * Flush a region from virtual address 'r0' to virtual address 'r1' | |
627 | * _exclusive_. There is no alignment requirement on either address; | |
628 | * user space does not need to know the hardware cache layout. | |
629 | * | |
630 | * r2 contains flags. It should ALWAYS be passed as ZERO until it | |
631 | * is defined to be something else. For now we ignore it, but may | |
632 | * the fires of hell burn in your belly if you break this rule. ;) | |
633 | * | |
634 | * (at a later date, we may want to allow this call to not flush | |
635 | * various aspects of the cache. Passing '0' will guarantee that | |
636 | * everything necessary gets flushed to maintain consistency in | |
637 | * the specified region). | |
638 | */ | |
639 | case NR(cacheflush): | |
c5102f59 | 640 | return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2); |
1da177e4 LT |
641 | |
642 | case NR(usr26): | |
643 | if (!(elf_hwcap & HWCAP_26BIT)) | |
644 | break; | |
645 | regs->ARM_cpsr &= ~MODE32_BIT; | |
646 | return regs->ARM_r0; | |
647 | ||
648 | case NR(usr32): | |
649 | if (!(elf_hwcap & HWCAP_26BIT)) | |
650 | break; | |
651 | regs->ARM_cpsr |= MODE32_BIT; | |
652 | return regs->ARM_r0; | |
653 | ||
654 | case NR(set_tls): | |
fbfb872f | 655 | set_tls(regs->ARM_r0); |
1da177e4 LT |
656 | return 0; |
657 | ||
8fcd6c45 NP |
658 | case NR(get_tls): |
659 | return current_thread_info()->tp_value[0]; | |
660 | ||
1da177e4 LT |
661 | default: |
662 | /* Calls 9f00xx..9f07ff are defined to return -ENOSYS | |
663 | if not implemented, rather than raising SIGILL. This | |
664 | way the calling program can gracefully determine whether | |
665 | a feature is supported. */ | |
bfd2e29f | 666 | if ((no & 0xffff) <= 0x7ff) |
1da177e4 LT |
667 | return -ENOSYS; |
668 | break; | |
669 | } | |
670 | #ifdef CONFIG_DEBUG_USER | |
671 | /* | |
672 | * experience shows that these seem to indicate that | |
673 | * something catastrophic has happened | |
674 | */ | |
675 | if (user_debug & UDBG_SYSCALL) { | |
4ed89f22 | 676 | pr_err("[%d] %s: arm syscall %d\n", |
19c5870c | 677 | task_pid_nr(current), current->comm, no); |
e8d7b735 | 678 | dump_instr(KERN_ERR, regs); |
1da177e4 | 679 | if (user_mode(regs)) { |
652a12ef | 680 | __show_regs(regs); |
e8d7b735 | 681 | c_backtrace(frame_pointer(regs), processor_mode(regs), KERN_ERR); |
1da177e4 LT |
682 | } |
683 | } | |
684 | #endif | |
05e792e3 EB |
685 | arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP, |
686 | (void __user *)instruction_pointer(regs) - | |
687 | (thumb_mode(regs) ? 2 : 4), | |
688 | no, 0); | |
1da177e4 LT |
689 | return 0; |
690 | } | |
691 | ||
4b0e07a5 | 692 | #ifdef CONFIG_TLS_REG_EMUL |
2d2669b6 NP |
693 | |
694 | /* | |
695 | * We might be running on an ARMv6+ processor which should have the TLS | |
4b0e07a5 NP |
696 | * register but for some reason we can't use it, or maybe an SMP system |
697 | * using a pre-ARMv6 processor (there are apparently a few prototypes like | |
698 | * that in existence) and therefore access to that register must be | |
699 | * emulated. | |
2d2669b6 NP |
700 | */ |
701 | ||
702 | static int get_tp_trap(struct pt_regs *regs, unsigned int instr) | |
703 | { | |
704 | int reg = (instr >> 12) & 15; | |
705 | if (reg == 15) | |
706 | return 1; | |
a4780ade | 707 | regs->uregs[reg] = current_thread_info()->tp_value[0]; |
2d2669b6 NP |
708 | regs->ARM_pc += 4; |
709 | return 0; | |
710 | } | |
711 | ||
712 | static struct undef_hook arm_mrc_hook = { | |
713 | .instr_mask = 0x0fff0fff, | |
714 | .instr_val = 0x0e1d0f70, | |
715 | .cpsr_mask = PSR_T_BIT, | |
716 | .cpsr_val = 0, | |
717 | .fn = get_tp_trap, | |
718 | }; | |
719 | ||
720 | static int __init arm_mrc_hook_init(void) | |
721 | { | |
722 | register_undef_hook(&arm_mrc_hook); | |
723 | return 0; | |
724 | } | |
725 | ||
726 | late_initcall(arm_mrc_hook_init); | |
727 | ||
728 | #endif | |
729 | ||
1da177e4 LT |
730 | /* |
731 | * A data abort trap was taken, but we did not handle the instruction. | |
732 | * Try to abort the user program, or panic if it was the kernel. | |
733 | */ | |
734 | asmlinkage void | |
735 | baddataabort(int code, unsigned long instr, struct pt_regs *regs) | |
736 | { | |
737 | unsigned long addr = instruction_pointer(regs); | |
3eb0f519 | 738 | |
1da177e4 LT |
739 | #ifdef CONFIG_DEBUG_USER |
740 | if (user_debug & UDBG_BADABORT) { | |
bafeb7a0 | 741 | pr_err("8<--- cut here ---\n"); |
4ed89f22 RK |
742 | pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n", |
743 | task_pid_nr(current), current->comm, code, instr); | |
e40c2ec6 | 744 | dump_instr(KERN_ERR, regs); |
49b38c34 | 745 | show_pte(KERN_ERR, current->mm, addr); |
1da177e4 LT |
746 | } |
747 | #endif | |
748 | ||
05e792e3 EB |
749 | arm_notify_die("unknown data abort code", regs, |
750 | SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0); | |
1da177e4 LT |
751 | } |
752 | ||
1da177e4 LT |
753 | void __readwrite_bug(const char *fn) |
754 | { | |
4ed89f22 | 755 | pr_err("%s called, but not implemented\n", fn); |
1da177e4 LT |
756 | BUG(); |
757 | } | |
758 | EXPORT_SYMBOL(__readwrite_bug); | |
759 | ||
4b026ca3 | 760 | #ifdef CONFIG_MMU |
69529c0e | 761 | void __pte_error(const char *file, int line, pte_t pte) |
1da177e4 | 762 | { |
4ed89f22 | 763 | pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte)); |
1da177e4 LT |
764 | } |
765 | ||
69529c0e | 766 | void __pmd_error(const char *file, int line, pmd_t pmd) |
1da177e4 | 767 | { |
4ed89f22 | 768 | pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd)); |
1da177e4 LT |
769 | } |
770 | ||
69529c0e | 771 | void __pgd_error(const char *file, int line, pgd_t pgd) |
1da177e4 | 772 | { |
4ed89f22 | 773 | pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd)); |
1da177e4 | 774 | } |
4b026ca3 | 775 | #endif |
1da177e4 LT |
776 | |
777 | asmlinkage void __div0(void) | |
778 | { | |
4ed89f22 | 779 | pr_err("Division by zero in kernel.\n"); |
1da177e4 LT |
780 | dump_stack(); |
781 | } | |
782 | EXPORT_SYMBOL(__div0); | |
783 | ||
784 | void abort(void) | |
785 | { | |
786 | BUG(); | |
787 | ||
788 | /* if that doesn't kill us, halt */ | |
789 | panic("Oops failed to kill thread"); | |
790 | } | |
1da177e4 | 791 | |
f6f91b0d RK |
792 | #ifdef CONFIG_KUSER_HELPERS |
793 | static void __init kuser_init(void *vectors) | |
f159f4ed | 794 | { |
f6f91b0d RK |
795 | extern char __kuser_helper_start[], __kuser_helper_end[]; |
796 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | |
797 | ||
798 | memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz); | |
799 | ||
f159f4ed TL |
800 | /* |
801 | * vectors + 0xfe0 = __kuser_get_tls | |
802 | * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8 | |
803 | */ | |
804 | if (tls_emu || has_tls_reg) | |
f6f91b0d RK |
805 | memcpy(vectors + 0xfe0, vectors + 0xfe8, 4); |
806 | } | |
807 | #else | |
5761704a | 808 | static inline void __init kuser_init(void *vectors) |
f6f91b0d | 809 | { |
f159f4ed | 810 | } |
f6f91b0d | 811 | #endif |
f159f4ed | 812 | |
04e91b73 RKO |
813 | #ifndef CONFIG_CPU_V7M |
814 | static void copy_from_lma(void *vma, void *lma_start, void *lma_end) | |
815 | { | |
816 | memcpy(vma, lma_start, lma_end - lma_start); | |
817 | } | |
818 | ||
819 | static void flush_vectors(void *vma, size_t offset, size_t size) | |
820 | { | |
821 | unsigned long start = (unsigned long)vma + offset; | |
822 | unsigned long end = start + size; | |
823 | ||
824 | flush_icache_range(start, end); | |
825 | } | |
826 | ||
b9baf5c8 RKO |
827 | #ifdef CONFIG_HARDEN_BRANCH_HISTORY |
828 | int spectre_bhb_update_vectors(unsigned int method) | |
829 | { | |
830 | extern char __vectors_bhb_bpiall_start[], __vectors_bhb_bpiall_end[]; | |
831 | extern char __vectors_bhb_loop8_start[], __vectors_bhb_loop8_end[]; | |
832 | void *vec_start, *vec_end; | |
833 | ||
834 | if (system_state >= SYSTEM_FREEING_INITMEM) { | |
835 | pr_err("CPU%u: Spectre BHB workaround too late - system vulnerable\n", | |
836 | smp_processor_id()); | |
837 | return SPECTRE_VULNERABLE; | |
838 | } | |
839 | ||
840 | switch (method) { | |
841 | case SPECTRE_V2_METHOD_LOOP8: | |
842 | vec_start = __vectors_bhb_loop8_start; | |
843 | vec_end = __vectors_bhb_loop8_end; | |
844 | break; | |
845 | ||
846 | case SPECTRE_V2_METHOD_BPIALL: | |
847 | vec_start = __vectors_bhb_bpiall_start; | |
848 | vec_end = __vectors_bhb_bpiall_end; | |
849 | break; | |
850 | ||
851 | default: | |
852 | pr_err("CPU%u: unknown Spectre BHB state %d\n", | |
853 | smp_processor_id(), method); | |
854 | return SPECTRE_VULNERABLE; | |
855 | } | |
856 | ||
857 | copy_from_lma(vectors_page, vec_start, vec_end); | |
858 | flush_vectors(vectors_page, 0, vec_end - vec_start); | |
859 | ||
860 | return SPECTRE_MITIGATED; | |
861 | } | |
862 | #endif | |
863 | ||
94e5a85b | 864 | void __init early_trap_init(void *vectors_base) |
1da177e4 | 865 | { |
7933523d RK |
866 | extern char __stubs_start[], __stubs_end[]; |
867 | extern char __vectors_start[], __vectors_end[]; | |
f928d4f2 | 868 | unsigned i; |
1da177e4 | 869 | |
94e5a85b RK |
870 | vectors_page = vectors_base; |
871 | ||
f928d4f2 RK |
872 | /* |
873 | * Poison the vectors page with an undefined instruction. This | |
874 | * instruction is chosen to be undefined for both ARM and Thumb | |
875 | * ISAs. The Thumb version is an undefined instruction with a | |
876 | * branch back to the undefined instruction. | |
877 | */ | |
878 | for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) | |
879 | ((u32 *)vectors_base)[i] = 0xe7fddef1; | |
880 | ||
7933523d | 881 | /* |
2d2669b6 NP |
882 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
883 | * into the vector page, mapped at 0xffff0000, and ensure these | |
884 | * are visible to the instruction stream. | |
7933523d | 885 | */ |
04e91b73 RKO |
886 | copy_from_lma(vectors_base, __vectors_start, __vectors_end); |
887 | copy_from_lma(vectors_base + 0x1000, __stubs_start, __stubs_end); | |
e00d349e | 888 | |
f6f91b0d | 889 | kuser_init(vectors_base); |
f159f4ed | 890 | |
04e91b73 RKO |
891 | flush_vectors(vectors_base, 0, PAGE_SIZE * 2); |
892 | } | |
55bdd694 | 893 | #else /* ifndef CONFIG_CPU_V7M */ |
04e91b73 RKO |
894 | void __init early_trap_init(void *vectors_base) |
895 | { | |
55bdd694 CM |
896 | /* |
897 | * on V7-M there is no need to copy the vector table to a dedicated | |
898 | * memory area. The address is configurable and so a table in the kernel | |
899 | * image can be used. | |
900 | */ | |
1da177e4 | 901 | } |
04e91b73 | 902 | #endif |
a1c510d0 AB |
903 | |
904 | #ifdef CONFIG_VMAP_STACK | |
905 | ||
906 | DECLARE_PER_CPU(u8 *, irq_stack_ptr); | |
907 | ||
908 | asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr); | |
909 | ||
910 | static int __init allocate_overflow_stacks(void) | |
911 | { | |
912 | u8 *stack; | |
913 | int cpu; | |
914 | ||
915 | for_each_possible_cpu(cpu) { | |
916 | stack = (u8 *)__get_free_page(GFP_KERNEL); | |
917 | if (WARN_ON(!stack)) | |
918 | return -ENOMEM; | |
919 | per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE]; | |
920 | } | |
921 | return 0; | |
922 | } | |
923 | early_initcall(allocate_overflow_stacks); | |
924 | ||
925 | asmlinkage void handle_bad_stack(struct pt_regs *regs) | |
926 | { | |
927 | unsigned long tsk_stk = (unsigned long)current->stack; | |
9c46929e | 928 | #ifdef CONFIG_IRQSTACKS |
370d51c8 | 929 | unsigned long irq_stk = (unsigned long)raw_cpu_read(irq_stack_ptr); |
9c46929e | 930 | #endif |
370d51c8 | 931 | unsigned long ovf_stk = (unsigned long)raw_cpu_read(overflow_stack_ptr); |
a1c510d0 AB |
932 | |
933 | console_verbose(); | |
934 | pr_emerg("Insufficient stack space to handle exception!"); | |
935 | ||
936 | pr_emerg("Task stack: [0x%08lx..0x%08lx]\n", | |
937 | tsk_stk, tsk_stk + THREAD_SIZE); | |
9c46929e | 938 | #ifdef CONFIG_IRQSTACKS |
a1c510d0 AB |
939 | pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n", |
940 | irq_stk - THREAD_SIZE, irq_stk); | |
9c46929e | 941 | #endif |
a1c510d0 AB |
942 | pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n", |
943 | ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk); | |
944 | ||
945 | die("kernel stack overflow", regs, 0); | |
946 | } | |
947 | ||
d31e23af | 948 | #ifndef CONFIG_ARM_LPAE |
a1c510d0 AB |
949 | /* |
950 | * Normally, we rely on the logic in do_translation_fault() to update stale PMD | |
951 | * entries covering the vmalloc space in a task's page tables when it first | |
952 | * accesses the region in question. Unfortunately, this is not sufficient when | |
953 | * the task stack resides in the vmalloc region, as do_translation_fault() is a | |
954 | * C function that needs a stack to run. | |
955 | * | |
956 | * So we need to ensure that these PMD entries are up to date *before* the MM | |
957 | * switch. As we already have some logic in the MM switch path that takes care | |
958 | * of this, let's trigger it by bumping the counter every time the core vmalloc | |
d31e23af AB |
959 | * code modifies a PMD entry in the vmalloc region. Use release semantics on |
960 | * the store so that other CPUs observing the counter's new value are | |
961 | * guaranteed to see the updated page table entries as well. | |
a1c510d0 AB |
962 | */ |
963 | void arch_sync_kernel_mappings(unsigned long start, unsigned long end) | |
964 | { | |
d31e23af AB |
965 | if (start < VMALLOC_END && end > VMALLOC_START) |
966 | atomic_inc_return_release(&init_mm.context.vmalloc_seq); | |
a1c510d0 | 967 | } |
d31e23af | 968 | #endif |
a1c510d0 | 969 | #endif |