Merge tag 'pci-v6.16-fixes-3' of git://git.kernel.org/pub/scm/linux/kernel/git/pci/pci
[linux-2.6-block.git] / arch / powerpc / kernel / traps.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
14cf11af 2/*
14cf11af 3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
fe04b112 4 * Copyright 2007-2010 Freescale Semiconductor, Inc.
14cf11af 5 *
14cf11af
PM
6 * Modified by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras (paulus@samba.org)
8 */
9
10/*
11 * This file handles the architecture-dependent parts of hardware exceptions
12 */
13
14cf11af
PM
14#include <linux/errno.h>
15#include <linux/sched.h>
b17b0153 16#include <linux/sched/debug.h>
14cf11af
PM
17#include <linux/kernel.h>
18#include <linux/mm.h>
99cd1302 19#include <linux/pkeys.h>
14cf11af
PM
20#include <linux/stddef.h>
21#include <linux/unistd.h>
8dad3f92 22#include <linux/ptrace.h>
14cf11af 23#include <linux/user.h>
14cf11af 24#include <linux/interrupt.h>
14cf11af 25#include <linux/init.h>
8a39b05f
PG
26#include <linux/extable.h>
27#include <linux/module.h> /* print_modules */
8dad3f92 28#include <linux/prctl.h>
14cf11af
PM
29#include <linux/delay.h>
30#include <linux/kprobes.h>
cc532915 31#include <linux/kexec.h>
5474c120 32#include <linux/backlight.h>
73c9ceab 33#include <linux/bug.h>
1eeb66a1 34#include <linux/kdebug.h>
76462232 35#include <linux/ratelimit.h>
ba12eede 36#include <linux/context_tracking.h>
5080332c 37#include <linux/smp.h>
35adacd6
NP
38#include <linux/console.h>
39#include <linux/kmsg_dump.h>
dbf77fed 40#include <linux/debugfs.h>
14cf11af 41
80947e7c 42#include <asm/emulated_ops.h>
7c0f6ba6 43#include <linux/uaccess.h>
3a96570f 44#include <asm/interrupt.h>
14cf11af 45#include <asm/io.h>
86417780
PM
46#include <asm/machdep.h>
47#include <asm/rtas.h>
f7f6f4fe 48#include <asm/pmc.h>
14cf11af 49#include <asm/reg.h>
14cf11af
PM
50#ifdef CONFIG_PMAC_BACKLIGHT
51#include <asm/backlight.h>
52#endif
dc1c1ca3 53#ifdef CONFIG_PPC64
86417780 54#include <asm/firmware.h>
dc1c1ca3 55#include <asm/processor.h>
dc1c1ca3 56#endif
c0ce7d08 57#include <asm/kexec.h>
16c57b36 58#include <asm/ppc-opcode.h>
cce1f106 59#include <asm/rio.h>
ebaeb5ae 60#include <asm/fadump.h>
ae3a197e 61#include <asm/switch_to.h>
f54db641 62#include <asm/tm.h>
ae3a197e 63#include <asm/debug.h>
42f5b4ca 64#include <asm/asm-prototypes.h>
fd7bacbc 65#include <asm/hmi.h>
4e0e3435 66#include <sysdev/fsl_pci.h>
6cc89bad 67#include <asm/kprobes.h>
a99b9c5e 68#include <asm/stacktrace.h>
de3c83c2 69#include <asm/nmi.h>
deefd0ae 70#include <asm/disassemble.h>
2f5182cf 71#include <asm/udbg.h>
dc1c1ca3 72
da665885 73#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
5be3492f
AB
74int (*__debugger)(struct pt_regs *regs) __read_mostly;
75int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
76int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
77int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
78int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
9422de3e 79int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
5be3492f 80int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
14cf11af
PM
81
82EXPORT_SYMBOL(__debugger);
83EXPORT_SYMBOL(__debugger_ipi);
84EXPORT_SYMBOL(__debugger_bpt);
85EXPORT_SYMBOL(__debugger_sstep);
86EXPORT_SYMBOL(__debugger_iabr_match);
9422de3e 87EXPORT_SYMBOL(__debugger_break_match);
14cf11af
PM
88EXPORT_SYMBOL(__debugger_fault_handler);
89#endif
90
8b3c34cf
MN
91/* Transactional Memory trap debug */
92#ifdef TM_DEBUG_SW
93#define TM_DEBUG(x...) printk(KERN_INFO x)
94#else
95#define TM_DEBUG(x...) do { } while(0)
96#endif
97
0f642d61
MOA
98static const char *signame(int signr)
99{
100 switch (signr) {
101 case SIGBUS: return "bus error";
102 case SIGFPE: return "floating point exception";
103 case SIGILL: return "illegal instruction";
104 case SIGSEGV: return "segfault";
105 case SIGTRAP: return "unhandled trap";
106 }
107
108 return "unknown signal";
109}
110
14cf11af
PM
111/*
112 * Trap & Exception support
113 */
114
6031d9d9 115#ifdef CONFIG_PMAC_BACKLIGHT
116static void pmac_backlight_unblank(void)
117{
118 mutex_lock(&pmac_backlight_mutex);
119 if (pmac_backlight) {
120 struct backlight_properties *props;
121
122 props = &pmac_backlight->props;
123 props->brightness = props->max_brightness;
28455894 124 props->power = BACKLIGHT_POWER_ON;
6031d9d9 125 backlight_update_status(pmac_backlight);
126 }
127 mutex_unlock(&pmac_backlight_mutex);
128}
129#else
130static inline void pmac_backlight_unblank(void) { }
131#endif
132
6fcd6baa
NP
133/*
134 * If oops/die is expected to crash the machine, return true here.
135 *
136 * This should not be expected to be 100% accurate, there may be
137 * notifiers registered or other unexpected conditions that may bring
138 * down the kernel. Or if the current process in the kernel is holding
139 * locks or has other critical state, the kernel may become effectively
140 * unusable anyway.
141 */
142bool die_will_crash(void)
143{
144 if (should_fadump_crash())
145 return true;
146 if (kexec_should_crash(current))
147 return true;
148 if (in_interrupt() || panic_on_oops ||
149 !current->pid || is_global_init(current))
150 return true;
151
152 return false;
153}
154
760ca4dc
AB
155static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
156static int die_owner = -1;
157static unsigned int die_nest_count;
158static int die_counter;
159
419d5d11 160void panic_flush_kmsg_start(void)
35adacd6
NP
161{
162 /*
163 * These are mostly taken from kernel/panic.c, but tries to do
164 * relatively minimal work. Don't use delay functions (TB may
165 * be broken), don't crash dump (need to set a firmware log),
166 * don't run notifiers. We do want to get some information to
167 * Linux console.
168 */
169 console_verbose();
170 bust_spinlocks(1);
171}
172
419d5d11 173void panic_flush_kmsg_end(void)
35adacd6 174{
35adacd6
NP
175 kmsg_dump(KMSG_DUMP_PANIC);
176 bust_spinlocks(0);
177 debug_locks_off();
de6da1e8 178 console_flush_on_panic(CONSOLE_FLUSH_PENDING);
35adacd6
NP
179}
180
03465f89 181static unsigned long oops_begin(struct pt_regs *regs)
14cf11af 182{
760ca4dc 183 int cpu;
34c2a14f 184 unsigned long flags;
14cf11af 185
293e4688 186 oops_enter();
187
760ca4dc
AB
188 /* racy, but better than risking deadlock. */
189 raw_local_irq_save(flags);
190 cpu = smp_processor_id();
191 if (!arch_spin_trylock(&die_lock)) {
192 if (cpu == die_owner)
193 /* nested oops. should stop eventually */;
194 else
195 arch_spin_lock(&die_lock);
34c2a14f 196 }
760ca4dc
AB
197 die_nest_count++;
198 die_owner = cpu;
199 console_verbose();
200 bust_spinlocks(1);
201 if (machine_is(powermac))
202 pmac_backlight_unblank();
203 return flags;
204}
03465f89 205NOKPROBE_SYMBOL(oops_begin);
e8222502 206
03465f89 207static void oops_end(unsigned long flags, struct pt_regs *regs,
760ca4dc
AB
208 int signr)
209{
14cf11af 210 bust_spinlocks(0);
373d4d09 211 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
760ca4dc 212 die_nest_count--;
58154c8c
AB
213 oops_exit();
214 printk("\n");
7458e8b2 215 if (!die_nest_count) {
760ca4dc 216 /* Nest count reaches zero, release the lock. */
7458e8b2 217 die_owner = -1;
760ca4dc 218 arch_spin_unlock(&die_lock);
7458e8b2 219 }
760ca4dc 220 raw_local_irq_restore(flags);
cc532915 221
d40b6768
NP
222 /*
223 * system_reset_excption handles debugger, crash dump, panic, for 0x100
224 */
7153d4bf 225 if (TRAP(regs) == INTERRUPT_SYSTEM_RESET)
d40b6768
NP
226 return;
227
ebaeb5ae
MS
228 crash_fadump(regs, "die oops");
229
4388c9b3 230 if (kexec_should_crash(current))
cc532915 231 crash_kexec(regs);
9b00ac06 232
760ca4dc
AB
233 if (!signr)
234 return;
235
58154c8c
AB
236 /*
237 * While our oops output is serialised by a spinlock, output
238 * from panic() called below can race and corrupt it. If we
239 * know we are going to panic, delay for 1 second so we have a
240 * chance to get clean backtraces from all CPUs that are oopsing.
241 */
242 if (in_interrupt() || panic_on_oops || !current->pid ||
243 is_global_init(current)) {
244 mdelay(MSEC_PER_SEC);
245 }
246
cea6a4ba 247 if (panic_on_oops)
012c437d 248 panic("Fatal exception");
0e25498f 249 make_task_dead(signr);
760ca4dc 250}
03465f89 251NOKPROBE_SYMBOL(oops_end);
cea6a4ba 252
d7e02f7b
AK
253static char *get_mmu_str(void)
254{
255 if (early_radix_enabled())
256 return " MMU=Radix";
257 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
258 return " MMU=Hash";
259 return "";
260}
261
03465f89 262static int __die(const char *str, struct pt_regs *regs, long err)
760ca4dc
AB
263{
264 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
2e82ca3c 265
732ed149 266 printk("%s PAGE_SIZE=%luK%s %s%s%s%s %s\n",
78227443 267 IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) ? "LE" : "BE",
d7e02f7b 268 PAGE_SIZE / 1024, get_mmu_str(),
78227443
ME
269 IS_ENABLED(CONFIG_SMP) ? " SMP" : "",
270 IS_ENABLED(CONFIG_SMP) ? (" NR_CPUS=" __stringify(NR_CPUS)) : "",
271 debug_pagealloc_enabled() ? " DEBUG_PAGEALLOC" : "",
272 IS_ENABLED(CONFIG_NUMA) ? " NUMA" : "",
273 ppc_md.name ? ppc_md.name : "");
760ca4dc
AB
274
275 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
276 return 1;
277
278 print_modules();
279 show_regs(regs);
14cf11af
PM
280
281 return 0;
282}
03465f89 283NOKPROBE_SYMBOL(__die);
14cf11af 284
760ca4dc
AB
285void die(const char *str, struct pt_regs *regs, long err)
286{
6f44b20e
NP
287 unsigned long flags;
288
d40b6768
NP
289 /*
290 * system_reset_excption handles debugger, crash dump, panic, for 0x100
291 */
7153d4bf 292 if (TRAP(regs) != INTERRUPT_SYSTEM_RESET) {
d40b6768
NP
293 if (debugger(regs))
294 return;
295 }
760ca4dc 296
6f44b20e 297 flags = oops_begin(regs);
760ca4dc
AB
298 if (__die(str, regs, err))
299 err = 0;
300 oops_end(flags, regs, err);
301}
15770a13 302NOKPROBE_SYMBOL(die);
760ca4dc 303
efc463ad 304void user_single_step_report(struct pt_regs *regs)
25baa35b 305{
2e1661d2 306 force_sig_fault(SIGTRAP, TRAP_TRACE, (void __user *)regs->nip);
25baa35b
ON
307}
308
997dd26c
ME
309static void show_signal_msg(int signr, struct pt_regs *regs, int code,
310 unsigned long addr)
35a52a10
MOA
311{
312 static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
313 DEFAULT_RATELIMIT_BURST);
35a52a10 314
997dd26c 315 if (!show_unhandled_signals)
35a52a10
MOA
316 return;
317
318 if (!unhandled_signal(current, signr))
319 return;
320
997dd26c
ME
321 if (!__ratelimit(&rs))
322 return;
323
0f642d61
MOA
324 pr_info("%s[%d]: %s (%d) at %lx nip %lx lr %lx code %x",
325 current->comm, current->pid, signame(signr), signr,
49d8f201 326 addr, regs->nip, regs->link, code);
0f642d61
MOA
327
328 print_vma_addr(KERN_CONT " in ", regs->nip);
329
330 pr_cont("\n");
a99b9c5e
MOA
331
332 show_user_instructions(regs);
658b0f92 333}
99cd1302 334
2c44ce28
EB
335static bool exception_common(int signr, struct pt_regs *regs, int code,
336 unsigned long addr)
14cf11af 337{
14cf11af 338 if (!user_mode(regs)) {
760ca4dc 339 die("Exception in kernel mode", regs, signr);
2c44ce28 340 return false;
760ca4dc
AB
341 }
342
d0afd44c
NP
343 /*
344 * Must not enable interrupts even for user-mode exception, because
345 * this can be called from machine check, which may be a NMI or IRQ
346 * which don't like interrupts being enabled. Could check for
347 * in_hardirq || in_nmi perhaps, but there doesn't seem to be a good
348 * reason why _exception() should enable irqs for an exception handler,
349 * the handlers themselves do that directly.
350 */
14cf11af 351
d0afd44c 352 show_signal_msg(signr, regs, code, addr);
9f2f79e3 353
41ab5266 354 current->thread.trap_nr = code;
c5cc1f4d 355
2c44ce28
EB
356 return true;
357}
358
5d8fb8a5 359void _exception_pkey(struct pt_regs *regs, unsigned long addr, int key)
2c44ce28 360{
5d8fb8a5 361 if (!exception_common(SIGSEGV, regs, SEGV_PKUERR, addr))
2c44ce28
EB
362 return;
363
77c70728 364 force_sig_pkuerr((void __user *) addr, key);
14cf11af
PM
365}
366
99cd1302
RP
367void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
368{
c1c7c85c
EB
369 if (!exception_common(signr, regs, code, addr))
370 return;
371
2e1661d2 372 force_sig_fault(signr, code, (void __user *)addr);
99cd1302
RP
373}
374
ccd47702
NP
375/*
376 * The interrupt architecture has a quirk in that the HV interrupts excluding
377 * the NMIs (0x100 and 0x200) do not clear MSR[RI] at entry. The first thing
378 * that an interrupt handler must do is save off a GPR into a scratch register,
379 * and all interrupts on POWERNV (HV=1) use the HSPRG1 register as scratch.
380 * Therefore an NMI can clobber an HV interrupt's live HSPRG1 without noticing
381 * that it is non-reentrant, which leads to random data corruption.
382 *
383 * The solution is for NMI interrupts in HV mode to check if they originated
384 * from these critical HV interrupt regions. If so, then mark them not
385 * recoverable.
386 *
387 * An alternative would be for HV NMIs to use SPRG for scratch to avoid the
388 * HSPRG1 clobber, however this would cause guest SPRG to be clobbered. Linux
389 * guests should always have MSR[RI]=0 when its scratch SPRG is in use, so
390 * that would work. However any other guest OS that may have the SPRG live
391 * and MSR[RI]=1 could encounter silent corruption.
392 *
393 * Builds that do not support KVM could take this second option to increase
394 * the recoverability of NMIs.
395 */
5352090a 396noinstr void hv_nmi_check_nonrecoverable(struct pt_regs *regs)
ccd47702
NP
397{
398#ifdef CONFIG_PPC_POWERNV
399 unsigned long kbase = (unsigned long)_stext;
400 unsigned long nip = regs->nip;
401
402 if (!(regs->msr & MSR_RI))
403 return;
404 if (!(regs->msr & MSR_HV))
405 return;
d5835fb6 406 if (user_mode(regs))
ccd47702
NP
407 return;
408
409 /*
410 * Now test if the interrupt has hit a range that may be using
411 * HSPRG1 without having RI=0 (i.e., an HSRR interrupt). The
412 * problem ranges all run un-relocated. Test real and virt modes
5c4a4802 413 * at the same time by dropping the high bit of the nip (virt mode
ccd47702
NP
414 * entry points still have the +0x4000 offset).
415 */
416 nip &= ~0xc000000000000000ULL;
417 if ((nip >= 0x500 && nip < 0x600) || (nip >= 0x4500 && nip < 0x4600))
418 goto nonrecoverable;
419 if ((nip >= 0x980 && nip < 0xa00) || (nip >= 0x4980 && nip < 0x4a00))
420 goto nonrecoverable;
421 if ((nip >= 0xe00 && nip < 0xec0) || (nip >= 0x4e00 && nip < 0x4ec0))
422 goto nonrecoverable;
423 if ((nip >= 0xf80 && nip < 0xfa0) || (nip >= 0x4f80 && nip < 0x4fa0))
424 goto nonrecoverable;
bd3524fe 425
ccd47702 426 /* Trampoline code runs un-relocated so subtract kbase. */
bd3524fe
NP
427 if (nip >= (unsigned long)(start_real_trampolines - kbase) &&
428 nip < (unsigned long)(end_real_trampolines - kbase))
ccd47702 429 goto nonrecoverable;
bd3524fe
NP
430 if (nip >= (unsigned long)(start_virt_trampolines - kbase) &&
431 nip < (unsigned long)(end_virt_trampolines - kbase))
ccd47702
NP
432 goto nonrecoverable;
433 return;
434
435nonrecoverable:
5352090a
DA
436 regs->msr &= ~MSR_RI;
437 local_paca->hsrr_valid = 0;
438 local_paca->srr_valid = 0;
ccd47702
NP
439#endif
440}
3a96570f 441DEFINE_INTERRUPT_HANDLER_NMI(system_reset_exception)
14cf11af 442{
cbf2ba95 443 unsigned long hsrr0, hsrr1;
cbf2ba95 444 bool saved_hsrrs = false;
2b4f3ac5 445
cbf2ba95
NP
446 /*
447 * System reset can interrupt code where HSRRs are live and MSR[RI]=1.
448 * The system reset interrupt itself may clobber HSRRs (e.g., to call
449 * OPAL), so save them here and restore them before returning.
450 *
451 * Machine checks don't need to save HSRRs, as the real mode handler
452 * is careful to avoid them, and the regular handler is not delivered
453 * as an NMI.
454 */
455 if (cpu_has_feature(CPU_FTR_HVMODE)) {
456 hsrr0 = mfspr(SPRN_HSRR0);
457 hsrr1 = mfspr(SPRN_HSRR1);
458 saved_hsrrs = true;
459 }
460
ccd47702
NP
461 hv_nmi_check_nonrecoverable(regs);
462
ca41ad43
NP
463 __this_cpu_inc(irq_stat.sreset_irqs);
464
14cf11af 465 /* See if any machine dependent calls */
c902be71
AB
466 if (ppc_md.system_reset_exception) {
467 if (ppc_md.system_reset_exception(regs))
c4f3b52c 468 goto out;
c902be71 469 }
14cf11af 470
4388c9b3
NP
471 if (debugger(regs))
472 goto out;
473
e7ca44ed 474 kmsg_dump(KMSG_DUMP_OOPS);
4388c9b3
NP
475 /*
476 * A system reset is a request to dump, so we always send
477 * it through the crashdump code (if fadump or kdump are
478 * registered).
479 */
480 crash_fadump(regs, "System Reset");
481
482 crash_kexec(regs);
483
484 /*
485 * We aren't the primary crash CPU. We need to send it
486 * to a holding pattern to avoid it ending up in the panic
487 * code.
488 */
489 crash_kexec_secondary(regs);
490
491 /*
492 * No debugger or crash dump registered, print logs then
493 * panic.
494 */
4552d128 495 die("System Reset", regs, SIGABRT);
4388c9b3
NP
496
497 mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
498 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
499 nmi_panic(regs, "System Reset");
14cf11af 500
c4f3b52c
NP
501out:
502#ifdef CONFIG_PPC_BOOK3S_64
503 BUG_ON(get_paca()->in_nmi == 0);
504 if (get_paca()->in_nmi > 1)
265d6e58 505 die("Unrecoverable nested System Reset", regs, SIGABRT);
c4f3b52c 506#endif
14cf11af 507 /* Must die if the interrupt is not recoverable */
806c0e6e 508 if (regs_is_unrecoverable(regs)) {
11cb0a25
NP
509 /* For the reason explained in die_mce, nmi_exit before die */
510 nmi_exit();
265d6e58 511 die("Unrecoverable System Reset", regs, SIGABRT);
11cb0a25 512 }
14cf11af 513
cbf2ba95
NP
514 if (saved_hsrrs) {
515 mtspr(SPRN_HSRR0, hsrr0);
516 mtspr(SPRN_HSRR1, hsrr1);
517 }
518
14cf11af 519 /* What should we do here? We could issue a shutdown or hard reset. */
3a96570f
NP
520
521 return 0;
14cf11af 522}
1e9b4507 523
14cf11af
PM
524/*
525 * I/O accesses can cause machine checks on powermacs.
526 * Check if the NIP corresponds to the address of a sync
527 * instruction for which there is an entry in the exception
528 * table.
14cf11af
PM
529 * -- paulus.
530 */
531static inline int check_io_access(struct pt_regs *regs)
532{
68a64357 533#ifdef CONFIG_PPC32
14cf11af
PM
534 unsigned long msr = regs->msr;
535 const struct exception_table_entry *entry;
536 unsigned int *nip = (unsigned int *)regs->nip;
537
538 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
539 && (entry = search_exception_tables(regs->nip)) != NULL) {
540 /*
541 * Check that it's a sync instruction, or somewhere
542 * in the twi; isync; nop sequence that inb/inw/inl uses.
543 * As the address is in the exception table
544 * we should be able to read the instr there.
545 * For the debug message, we look at the preceding
546 * load or store.
547 */
deefd0ae 548 if (*nip == PPC_RAW_NOP())
14cf11af 549 nip -= 2;
deefd0ae 550 else if (*nip == PPC_RAW_ISYNC())
14cf11af 551 --nip;
deefd0ae 552 if (*nip == PPC_RAW_SYNC() || get_op(*nip) == OP_TRAP) {
14cf11af
PM
553 unsigned int rb;
554
555 --nip;
556 rb = (*nip >> 11) & 0x1f;
557 printk(KERN_DEBUG "%s bad port %lx at %p\n",
558 (*nip & 0x100)? "OUT to": "IN from",
559 regs->gpr[rb] - _IO_BASE, nip);
806c0e6e 560 regs_set_recoverable(regs);
59dc5bfc 561 regs_set_return_ip(regs, extable_fixup(entry));
14cf11af
PM
562 return 1;
563 }
564 }
68a64357 565#endif /* CONFIG_PPC32 */
14cf11af
PM
566 return 0;
567}
568
172ae2e7 569#ifdef CONFIG_PPC_ADV_DEBUG_REGS
14cf11af
PM
570/* On 4xx, the reason for the machine check or program exception
571 is in the ESR. */
4f8e78c0 572#define get_reason(regs) ((regs)->esr)
14cf11af
PM
573#define REASON_FP ESR_FP
574#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
575#define REASON_PRIVILEGED ESR_PPR
576#define REASON_TRAP ESR_PTR
9409d2f9
JN
577#define REASON_PREFIXED 0
578#define REASON_BOUNDARY 0
14cf11af
PM
579
580/* single-step stuff */
51ae8d4a
BB
581#define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
582#define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
0e524e76 583#define clear_br_trace(regs) do {} while(0)
14cf11af
PM
584#else
585/* On non-4xx, the reason for the machine check or program
586 exception is in the MSR. */
587#define get_reason(regs) ((regs)->msr)
d30a5a52
ME
588#define REASON_TM SRR1_PROGTM
589#define REASON_FP SRR1_PROGFPE
590#define REASON_ILLEGAL SRR1_PROGILL
591#define REASON_PRIVILEGED SRR1_PROGPRIV
592#define REASON_TRAP SRR1_PROGTRAP
9409d2f9
JN
593#define REASON_PREFIXED SRR1_PREFIXED
594#define REASON_BOUNDARY SRR1_BOUNDARY
14cf11af
PM
595
596#define single_stepping(regs) ((regs)->msr & MSR_SE)
59dc5bfc
NP
597#define clear_single_step(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_SE))
598#define clear_br_trace(regs) (regs_set_return_msr((regs), (regs)->msr & ~MSR_BE))
14cf11af
PM
599#endif
600
9409d2f9
JN
601#define inst_length(reason) (((reason) & REASON_PREFIXED) ? 8 : 4)
602
688de017 603#if defined(CONFIG_PPC_E500)
fe04b112
SW
604int machine_check_e500mc(struct pt_regs *regs)
605{
606 unsigned long mcsr = mfspr(SPRN_MCSR);
a4e89ffb 607 unsigned long pvr = mfspr(SPRN_PVR);
fe04b112
SW
608 unsigned long reason = mcsr;
609 int recoverable = 1;
610
82a9a480 611 if (reason & MCSR_LD) {
cce1f106
SX
612 recoverable = fsl_rio_mcheck_exception(regs);
613 if (recoverable == 1)
614 goto silent_out;
615 }
616
fe04b112
SW
617 printk("Machine check in kernel mode.\n");
618 printk("Caused by (from MCSR=%lx): ", reason);
619
620 if (reason & MCSR_MCP)
422123cc 621 pr_cont("Machine Check Signal\n");
fe04b112
SW
622
623 if (reason & MCSR_ICPERR) {
422123cc 624 pr_cont("Instruction Cache Parity Error\n");
fe04b112
SW
625
626 /*
627 * This is recoverable by invalidating the i-cache.
628 */
629 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
630 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
631 ;
632
633 /*
634 * This will generally be accompanied by an instruction
635 * fetch error report -- only treat MCSR_IF as fatal
636 * if it wasn't due to an L1 parity error.
637 */
638 reason &= ~MCSR_IF;
639 }
640
641 if (reason & MCSR_DCPERR_MC) {
422123cc 642 pr_cont("Data Cache Parity Error\n");
37caf9f2
KG
643
644 /*
645 * In write shadow mode we auto-recover from the error, but it
646 * may still get logged and cause a machine check. We should
647 * only treat the non-write shadow case as non-recoverable.
648 */
a4e89ffb
MW
649 /* On e6500 core, L1 DCWS (Data cache write shadow mode) bit
650 * is not implemented but L1 data cache always runs in write
651 * shadow mode. Hence on data cache parity errors HW will
652 * automatically invalidate the L1 Data Cache.
653 */
654 if (PVR_VER(pvr) != PVR_VER_E6500) {
655 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
656 recoverable = 0;
657 }
fe04b112
SW
658 }
659
660 if (reason & MCSR_L2MMU_MHIT) {
422123cc 661 pr_cont("Hit on multiple TLB entries\n");
fe04b112
SW
662 recoverable = 0;
663 }
664
665 if (reason & MCSR_NMI)
422123cc 666 pr_cont("Non-maskable interrupt\n");
fe04b112
SW
667
668 if (reason & MCSR_IF) {
422123cc 669 pr_cont("Instruction Fetch Error Report\n");
fe04b112
SW
670 recoverable = 0;
671 }
672
673 if (reason & MCSR_LD) {
422123cc 674 pr_cont("Load Error Report\n");
fe04b112
SW
675 recoverable = 0;
676 }
677
678 if (reason & MCSR_ST) {
422123cc 679 pr_cont("Store Error Report\n");
fe04b112
SW
680 recoverable = 0;
681 }
682
683 if (reason & MCSR_LDG) {
422123cc 684 pr_cont("Guarded Load Error Report\n");
fe04b112
SW
685 recoverable = 0;
686 }
687
688 if (reason & MCSR_TLBSYNC)
422123cc 689 pr_cont("Simultaneous tlbsync operations\n");
fe04b112
SW
690
691 if (reason & MCSR_BSL2_ERR) {
422123cc 692 pr_cont("Level 2 Cache Error\n");
fe04b112
SW
693 recoverable = 0;
694 }
695
696 if (reason & MCSR_MAV) {
697 u64 addr;
698
699 addr = mfspr(SPRN_MCAR);
700 addr |= (u64)mfspr(SPRN_MCARU) << 32;
701
422123cc 702 pr_cont("Machine Check %s Address: %#llx\n",
fe04b112
SW
703 reason & MCSR_MEA ? "Effective" : "Physical", addr);
704 }
705
cce1f106 706silent_out:
fe04b112
SW
707 mtspr(SPRN_MCSR, mcsr);
708 return mfspr(SPRN_MCSR) == 0 && recoverable;
709}
710
47c0bd1a
BH
711int machine_check_e500(struct pt_regs *regs)
712{
42bff234 713 unsigned long reason = mfspr(SPRN_MCSR);
47c0bd1a 714
cce1f106
SX
715 if (reason & MCSR_BUS_RBERR) {
716 if (fsl_rio_mcheck_exception(regs))
717 return 1;
4e0e3435
HJ
718 if (fsl_pci_mcheck_exception(regs))
719 return 1;
cce1f106
SX
720 }
721
14cf11af
PM
722 printk("Machine check in kernel mode.\n");
723 printk("Caused by (from MCSR=%lx): ", reason);
724
725 if (reason & MCSR_MCP)
422123cc 726 pr_cont("Machine Check Signal\n");
14cf11af 727 if (reason & MCSR_ICPERR)
422123cc 728 pr_cont("Instruction Cache Parity Error\n");
14cf11af 729 if (reason & MCSR_DCP_PERR)
422123cc 730 pr_cont("Data Cache Push Parity Error\n");
14cf11af 731 if (reason & MCSR_DCPERR)
422123cc 732 pr_cont("Data Cache Parity Error\n");
14cf11af 733 if (reason & MCSR_BUS_IAERR)
422123cc 734 pr_cont("Bus - Instruction Address Error\n");
14cf11af 735 if (reason & MCSR_BUS_RAERR)
422123cc 736 pr_cont("Bus - Read Address Error\n");
14cf11af 737 if (reason & MCSR_BUS_WAERR)
422123cc 738 pr_cont("Bus - Write Address Error\n");
14cf11af 739 if (reason & MCSR_BUS_IBERR)
422123cc 740 pr_cont("Bus - Instruction Data Error\n");
14cf11af 741 if (reason & MCSR_BUS_RBERR)
422123cc 742 pr_cont("Bus - Read Data Bus Error\n");
14cf11af 743 if (reason & MCSR_BUS_WBERR)
422123cc 744 pr_cont("Bus - Write Data Bus Error\n");
14cf11af 745 if (reason & MCSR_BUS_IPERR)
422123cc 746 pr_cont("Bus - Instruction Parity Error\n");
14cf11af 747 if (reason & MCSR_BUS_RPERR)
422123cc 748 pr_cont("Bus - Read Parity Error\n");
47c0bd1a
BH
749
750 return 0;
751}
4490c06b
KG
752
753int machine_check_generic(struct pt_regs *regs)
754{
755 return 0;
756}
7f3f819e 757#elif defined(CONFIG_PPC32)
47c0bd1a
BH
758int machine_check_generic(struct pt_regs *regs)
759{
42bff234 760 unsigned long reason = regs->msr;
47c0bd1a 761
14cf11af
PM
762 printk("Machine check in kernel mode.\n");
763 printk("Caused by (from SRR1=%lx): ", reason);
764 switch (reason & 0x601F0000) {
765 case 0x80000:
422123cc 766 pr_cont("Machine check signal\n");
14cf11af 767 break;
14cf11af
PM
768 case 0x40000:
769 case 0x140000: /* 7450 MSS error and TEA */
422123cc 770 pr_cont("Transfer error ack signal\n");
14cf11af
PM
771 break;
772 case 0x20000:
422123cc 773 pr_cont("Data parity error signal\n");
14cf11af
PM
774 break;
775 case 0x10000:
422123cc 776 pr_cont("Address parity error signal\n");
14cf11af
PM
777 break;
778 case 0x20000000:
422123cc 779 pr_cont("L1 Data Cache error\n");
14cf11af
PM
780 break;
781 case 0x40000000:
422123cc 782 pr_cont("L1 Instruction Cache error\n");
14cf11af
PM
783 break;
784 case 0x00100000:
422123cc 785 pr_cont("L2 data cache parity error\n");
14cf11af
PM
786 break;
787 default:
422123cc 788 pr_cont("Unknown values in msr\n");
14cf11af 789 }
75918a4b
OJ
790 return 0;
791}
47c0bd1a 792#endif /* everything else */
75918a4b 793
209e9d50
NP
794void die_mce(const char *str, struct pt_regs *regs, long err)
795{
796 /*
0e25498f
EB
797 * The machine check wants to kill the interrupted context,
798 * but make_task_dead() checks for in_interrupt() and panics
799 * in that case, so exit the irq/nmi before calling die.
209e9d50 800 */
f08fb25b 801 if (in_nmi())
209e9d50 802 nmi_exit();
f08fb25b
NP
803 else
804 irq_exit();
209e9d50
NP
805 die(str, regs, err);
806}
209e9d50 807
118178e6 808/*
f08fb25b 809 * BOOK3S_64 does not usually call this handler as a non-maskable interrupt
118178e6
NP
810 * (it uses its own early real-mode handler to handle the MCE proper
811 * and then raises irq_work to call this handler when interrupts are
f08fb25b
NP
812 * enabled). The only time when this is not true is if the early handler
813 * is unrecoverable, then it does call this directly to try to get a
814 * message out.
118178e6 815 */
f08fb25b 816static void __machine_check_exception(struct pt_regs *regs)
75918a4b
OJ
817{
818 int recover = 0;
69ea03b5 819
8a03e81c 820 __this_cpu_inc(irq_stat.mce_exceptions);
89713ed1 821
d93b0ac0
MS
822 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
823
47c0bd1a
BH
824 /* See if any machine dependent calls. In theory, we would want
825 * to call the CPU first, and call the ppc_md. one if the CPU
826 * one returns a positive number. However there is existing code
827 * that assumes the board gets a first chance, so let's keep it
828 * that way for now and fix things later. --BenH.
829 */
75918a4b
OJ
830 if (ppc_md.machine_check_exception)
831 recover = ppc_md.machine_check_exception(regs);
47c0bd1a
BH
832 else if (cur_cpu_spec->machine_check)
833 recover = cur_cpu_spec->machine_check(regs);
75918a4b 834
47c0bd1a 835 if (recover > 0)
ba12eede 836 goto bail;
75918a4b 837
a443506b 838 if (debugger_fault_handler(regs))
ba12eede 839 goto bail;
75918a4b
OJ
840
841 if (check_io_access(regs))
ba12eede 842 goto bail;
75918a4b 843
209e9d50 844 die_mce("Machine check", regs, SIGBUS);
daf00ae7 845
c538938f 846bail:
0bbea75c 847 /* Must die if the interrupt is not recoverable */
806c0e6e 848 if (regs_is_unrecoverable(regs))
209e9d50 849 die_mce("Unrecoverable Machine check", regs, SIGBUS);
f08fb25b 850}
daf00ae7 851
3a96570f 852#ifdef CONFIG_PPC_BOOK3S_64
2f5182cf
NP
853DEFINE_INTERRUPT_HANDLER_RAW(machine_check_early_boot)
854{
855 udbg_printf("Machine check (early boot)\n");
856 udbg_printf("SRR0=0x%016lx SRR1=0x%016lx\n", regs->nip, regs->msr);
857 udbg_printf(" DAR=0x%016lx DSISR=0x%08lx\n", regs->dar, regs->dsisr);
858 udbg_printf(" LR=0x%016lx R1=0x%08lx\n", regs->link, regs->gpr[1]);
859 udbg_printf("------\n");
860 die("Machine check (early boot)", regs, SIGBUS);
861 for (;;)
862 ;
863 return 0;
864}
865
f08fb25b
NP
866DEFINE_INTERRUPT_HANDLER_ASYNC(machine_check_exception_async)
867{
868 __machine_check_exception(regs);
869}
3a96570f 870#endif
f08fb25b
NP
871DEFINE_INTERRUPT_HANDLER_NMI(machine_check_exception)
872{
873 __machine_check_exception(regs);
874
875 return 0;
14cf11af
PM
876}
877
3a96570f 878DEFINE_INTERRUPT_HANDLER(SMIException) /* async? */
14cf11af
PM
879{
880 die("System Management Interrupt", regs, SIGABRT);
881}
882
5080332c
MN
883#ifdef CONFIG_VSX
884static void p9_hmi_special_emu(struct pt_regs *regs)
885{
886 unsigned int ra, rb, t, i, sel, instr, rc;
887 const void __user *addr;
1da4a027 888 u8 vbuf[16] __aligned(16), *vdst;
5080332c
MN
889 unsigned long ea, msr, msr_mask;
890 bool swap;
891
bad956b8 892 if (__get_user(instr, (unsigned int __user *)regs->nip))
5080332c
MN
893 return;
894
895 /*
896 * lxvb16x opcode: 0x7c0006d8
897 * lxvd2x opcode: 0x7c000698
898 * lxvh8x opcode: 0x7c000658
899 * lxvw4x opcode: 0x7c000618
900 */
901 if ((instr & 0xfc00073e) != 0x7c000618) {
902 pr_devel("HMI vec emu: not vector CI %i:%s[%d] nip=%016lx"
903 " instr=%08x\n",
904 smp_processor_id(), current->comm, current->pid,
905 regs->nip, instr);
906 return;
907 }
908
909 /* Grab vector registers into the task struct */
910 msr = regs->msr; /* Grab msr before we flush the bits */
911 flush_vsx_to_thread(current);
912 enable_kernel_altivec();
913
914 /*
915 * Is userspace running with a different endian (this is rare but
916 * not impossible)
917 */
918 swap = (msr & MSR_LE) != (MSR_KERNEL & MSR_LE);
919
920 /* Decode the instruction */
921 ra = (instr >> 16) & 0x1f;
922 rb = (instr >> 11) & 0x1f;
923 t = (instr >> 21) & 0x1f;
924 if (instr & 1)
925 vdst = (u8 *)&current->thread.vr_state.vr[t];
926 else
927 vdst = (u8 *)&current->thread.fp_state.fpr[t][0];
928
929 /* Grab the vector address */
930 ea = regs->gpr[rb] + (ra ? regs->gpr[ra] : 0);
931 if (is_32bit_task())
932 ea &= 0xfffffffful;
933 addr = (__force const void __user *)ea;
934
935 /* Check it */
96d4f267 936 if (!access_ok(addr, 16)) {
5080332c
MN
937 pr_devel("HMI vec emu: bad access %i:%s[%d] nip=%016lx"
938 " instr=%08x addr=%016lx\n",
939 smp_processor_id(), current->comm, current->pid,
940 regs->nip, instr, (unsigned long)addr);
941 return;
942 }
943
944 /* Read the vector */
945 rc = 0;
946 if ((unsigned long)addr & 0xfUL)
947 /* unaligned case */
948 rc = __copy_from_user_inatomic(vbuf, addr, 16);
949 else
950 __get_user_atomic_128_aligned(vbuf, addr, rc);
951 if (rc) {
952 pr_devel("HMI vec emu: page fault %i:%s[%d] nip=%016lx"
953 " instr=%08x addr=%016lx\n",
954 smp_processor_id(), current->comm, current->pid,
955 regs->nip, instr, (unsigned long)addr);
956 return;
957 }
958
959 pr_devel("HMI vec emu: emulated vector CI %i:%s[%d] nip=%016lx"
960 " instr=%08x addr=%016lx\n",
961 smp_processor_id(), current->comm, current->pid, regs->nip,
962 instr, (unsigned long) addr);
963
964 /* Grab instruction "selector" */
965 sel = (instr >> 6) & 3;
966
967 /*
968 * Check to make sure the facility is actually enabled. This
969 * could happen if we get a false positive hit.
970 *
971 * lxvd2x/lxvw4x always check MSR VSX sel = 0,2
972 * lxvh8x/lxvb16x check MSR VSX or VEC depending on VSR used sel = 1,3
973 */
974 msr_mask = MSR_VSX;
975 if ((sel & 1) && (instr & 1)) /* lxvh8x & lxvb16x + VSR >= 32 */
976 msr_mask = MSR_VEC;
977 if (!(msr & msr_mask)) {
978 pr_devel("HMI vec emu: MSR fac clear %i:%s[%d] nip=%016lx"
979 " instr=%08x msr:%016lx\n",
980 smp_processor_id(), current->comm, current->pid,
981 regs->nip, instr, msr);
982 return;
983 }
984
985 /* Do logging here before we modify sel based on endian */
986 switch (sel) {
987 case 0: /* lxvw4x */
988 PPC_WARN_EMULATED(lxvw4x, regs);
989 break;
990 case 1: /* lxvh8x */
991 PPC_WARN_EMULATED(lxvh8x, regs);
992 break;
993 case 2: /* lxvd2x */
994 PPC_WARN_EMULATED(lxvd2x, regs);
995 break;
996 case 3: /* lxvb16x */
997 PPC_WARN_EMULATED(lxvb16x, regs);
998 break;
999 }
1000
1001#ifdef __LITTLE_ENDIAN__
1002 /*
1003 * An LE kernel stores the vector in the task struct as an LE
1004 * byte array (effectively swapping both the components and
1005 * the content of the components). Those instructions expect
1006 * the components to remain in ascending address order, so we
1007 * swap them back.
1008 *
1009 * If we are running a BE user space, the expectation is that
1010 * of a simple memcpy, so forcing the emulation to look like
1011 * a lxvb16x should do the trick.
1012 */
1013 if (swap)
1014 sel = 3;
1015
1016 switch (sel) {
1017 case 0: /* lxvw4x */
1018 for (i = 0; i < 4; i++)
1019 ((u32 *)vdst)[i] = ((u32 *)vbuf)[3-i];
1020 break;
1021 case 1: /* lxvh8x */
1022 for (i = 0; i < 8; i++)
1023 ((u16 *)vdst)[i] = ((u16 *)vbuf)[7-i];
1024 break;
1025 case 2: /* lxvd2x */
1026 for (i = 0; i < 2; i++)
1027 ((u64 *)vdst)[i] = ((u64 *)vbuf)[1-i];
1028 break;
1029 case 3: /* lxvb16x */
1030 for (i = 0; i < 16; i++)
1031 vdst[i] = vbuf[15-i];
1032 break;
1033 }
1034#else /* __LITTLE_ENDIAN__ */
1035 /* On a big endian kernel, a BE userspace only needs a memcpy */
1036 if (!swap)
1037 sel = 3;
1038
1039 /* Otherwise, we need to swap the content of the components */
1040 switch (sel) {
1041 case 0: /* lxvw4x */
1042 for (i = 0; i < 4; i++)
1043 ((u32 *)vdst)[i] = cpu_to_le32(((u32 *)vbuf)[i]);
1044 break;
1045 case 1: /* lxvh8x */
1046 for (i = 0; i < 8; i++)
1047 ((u16 *)vdst)[i] = cpu_to_le16(((u16 *)vbuf)[i]);
1048 break;
1049 case 2: /* lxvd2x */
1050 for (i = 0; i < 2; i++)
1051 ((u64 *)vdst)[i] = cpu_to_le64(((u64 *)vbuf)[i]);
1052 break;
1053 case 3: /* lxvb16x */
1054 memcpy(vdst, vbuf, 16);
1055 break;
1056 }
1057#endif /* !__LITTLE_ENDIAN__ */
1058
1059 /* Go to next instruction */
59dc5bfc 1060 regs_add_return_ip(regs, 4);
5080332c
MN
1061}
1062#endif /* CONFIG_VSX */
1063
3a96570f 1064DEFINE_INTERRUPT_HANDLER_ASYNC(handle_hmi_exception)
0869b6fd
MS
1065{
1066 struct pt_regs *old_regs;
1067
1068 old_regs = set_irq_regs(regs);
0869b6fd 1069
5080332c
MN
1070#ifdef CONFIG_VSX
1071 /* Real mode flagged P9 special emu is needed */
1072 if (local_paca->hmi_p9_special_emu) {
1073 local_paca->hmi_p9_special_emu = 0;
1074
1075 /*
1076 * We don't want to take page faults while doing the
1077 * emulation, we just replay the instruction if necessary.
1078 */
1079 pagefault_disable();
1080 p9_hmi_special_emu(regs);
1081 pagefault_enable();
1082 }
1083#endif /* CONFIG_VSX */
1084
0869b6fd
MS
1085 if (ppc_md.handle_hmi_exception)
1086 ppc_md.handle_hmi_exception(regs);
1087
0869b6fd
MS
1088 set_irq_regs(old_regs);
1089}
1090
3a96570f 1091DEFINE_INTERRUPT_HANDLER(unknown_exception)
14cf11af
PM
1092{
1093 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1094 regs->nip, regs->msr, regs->trap);
1095
e821fa42 1096 _exception(SIGTRAP, regs, TRAP_UNK, 0);
6c6aee00
NP
1097}
1098
3a96570f 1099DEFINE_INTERRUPT_HANDLER_ASYNC(unknown_async_exception)
6c6aee00 1100{
6c6aee00
NP
1101 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1102 regs->nip, regs->msr, regs->trap);
1103
1104 _exception(SIGTRAP, regs, TRAP_UNK, 0);
14cf11af
PM
1105}
1106
3db8aa10
NP
1107DEFINE_INTERRUPT_HANDLER_NMI(unknown_nmi_exception)
1108{
1109 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
1110 regs->nip, regs->msr, regs->trap);
1111
1112 _exception(SIGTRAP, regs, TRAP_UNK, 0);
1113
1114 return 0;
1115}
1116
3a96570f 1117DEFINE_INTERRUPT_HANDLER(instruction_breakpoint_exception)
14cf11af
PM
1118{
1119 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
1120 5, SIGTRAP) == NOTIFY_STOP)
540d4d34 1121 return;
14cf11af 1122 if (debugger_iabr_match(regs))
540d4d34 1123 return;
14cf11af
PM
1124 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1125}
1126
3a96570f 1127DEFINE_INTERRUPT_HANDLER(RunModeException)
14cf11af 1128{
e821fa42 1129 _exception(SIGTRAP, regs, TRAP_UNK, 0);
14cf11af
PM
1130}
1131
01fcac8e 1132static void __single_step_exception(struct pt_regs *regs)
14cf11af 1133{
2538c2d0 1134 clear_single_step(regs);
0e524e76 1135 clear_br_trace(regs);
14cf11af 1136
6cc89bad
NR
1137 if (kprobe_post_handler(regs))
1138 return;
1139
14cf11af
PM
1140 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1141 5, SIGTRAP) == NOTIFY_STOP)
540d4d34 1142 return;
14cf11af 1143 if (debugger_sstep(regs))
540d4d34 1144 return;
14cf11af
PM
1145
1146 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1147}
1148
01fcac8e
CL
1149DEFINE_INTERRUPT_HANDLER(single_step_exception)
1150{
1151 __single_step_exception(regs);
1152}
1153
14cf11af
PM
1154/*
1155 * After we have successfully emulated an instruction, we have to
1156 * check if the instruction was being single-stepped, and if so,
1157 * pretend we got a single-step exception. This was pointed out
1158 * by Kumar Gala. -- paulus
1159 */
5222a1d5 1160void emulate_single_step(struct pt_regs *regs)
14cf11af 1161{
2538c2d0 1162 if (single_stepping(regs))
01fcac8e 1163 __single_step_exception(regs);
14cf11af
PM
1164}
1165
c7e0d9bb 1166#ifdef CONFIG_PPC_FPU_REGS
5fad293b 1167static inline int __parse_fpscr(unsigned long fpscr)
dc1c1ca3 1168{
aeb1c0f6 1169 int ret = FPE_FLTUNK;
dc1c1ca3
SR
1170
1171 /* Invalid operation */
1172 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
5fad293b 1173 ret = FPE_FLTINV;
dc1c1ca3
SR
1174
1175 /* Overflow */
1176 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
5fad293b 1177 ret = FPE_FLTOVF;
dc1c1ca3
SR
1178
1179 /* Underflow */
1180 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
5fad293b 1181 ret = FPE_FLTUND;
dc1c1ca3
SR
1182
1183 /* Divide by zero */
1184 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
5fad293b 1185 ret = FPE_FLTDIV;
dc1c1ca3
SR
1186
1187 /* Inexact result */
1188 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
5fad293b
KG
1189 ret = FPE_FLTRES;
1190
1191 return ret;
1192}
c7e0d9bb 1193#endif
5fad293b
KG
1194
1195static void parse_fpe(struct pt_regs *regs)
1196{
1197 int code = 0;
1198
1199 flush_fp_to_thread(current);
1200
b6254ced 1201#ifdef CONFIG_PPC_FPU_REGS
de79f7b9 1202 code = __parse_fpscr(current->thread.fp_state.fpscr);
b6254ced 1203#endif
dc1c1ca3
SR
1204
1205 _exception(SIGFPE, regs, code, regs->nip);
1206}
1207
1208/*
1209 * Illegal instruction emulation support. Originally written to
14cf11af
PM
1210 * provide the PVR to user applications using the mfspr rd, PVR.
1211 * Return non-zero if we can't emulate, or -EFAULT if the associated
1212 * memory access caused an access fault. Return zero on success.
1213 *
1214 * There are a couple of ways to do this, either "decode" the instruction
1215 * or directly match lots of bits. In this case, matching lots of
1216 * bits is faster and easier.
86417780 1217 *
14cf11af 1218 */
14cf11af
PM
1219static int emulate_string_inst(struct pt_regs *regs, u32 instword)
1220{
1221 u8 rT = (instword >> 21) & 0x1f;
1222 u8 rA = (instword >> 16) & 0x1f;
1223 u8 NB_RB = (instword >> 11) & 0x1f;
1224 u32 num_bytes;
1225 unsigned long EA;
1226 int pos = 0;
1227
1228 /* Early out if we are an invalid form of lswx */
16c57b36 1229 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
14cf11af
PM
1230 if ((rT == rA) || (rT == NB_RB))
1231 return -EINVAL;
1232
1233 EA = (rA == 0) ? 0 : regs->gpr[rA];
1234
16c57b36
KG
1235 switch (instword & PPC_INST_STRING_MASK) {
1236 case PPC_INST_LSWX:
1237 case PPC_INST_STSWX:
14cf11af
PM
1238 EA += NB_RB;
1239 num_bytes = regs->xer & 0x7f;
1240 break;
16c57b36
KG
1241 case PPC_INST_LSWI:
1242 case PPC_INST_STSWI:
14cf11af
PM
1243 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
1244 break;
1245 default:
1246 return -EINVAL;
1247 }
1248
1249 while (num_bytes != 0)
1250 {
1251 u8 val;
1252 u32 shift = 8 * (3 - (pos & 0x3));
1253
80aa0fb4
JY
1254 /* if process is 32-bit, clear upper 32 bits of EA */
1255 if ((regs->msr & MSR_64BIT) == 0)
1256 EA &= 0xFFFFFFFF;
1257
16c57b36
KG
1258 switch ((instword & PPC_INST_STRING_MASK)) {
1259 case PPC_INST_LSWX:
1260 case PPC_INST_LSWI:
14cf11af
PM
1261 if (get_user(val, (u8 __user *)EA))
1262 return -EFAULT;
1263 /* first time updating this reg,
1264 * zero it out */
1265 if (pos == 0)
1266 regs->gpr[rT] = 0;
1267 regs->gpr[rT] |= val << shift;
1268 break;
16c57b36
KG
1269 case PPC_INST_STSWI:
1270 case PPC_INST_STSWX:
14cf11af
PM
1271 val = regs->gpr[rT] >> shift;
1272 if (put_user(val, (u8 __user *)EA))
1273 return -EFAULT;
1274 break;
1275 }
1276 /* move EA to next address */
1277 EA += 1;
1278 num_bytes--;
1279
1280 /* manage our position within the register */
1281 if (++pos == 4) {
1282 pos = 0;
1283 if (++rT == 32)
1284 rT = 0;
1285 }
1286 }
1287
1288 return 0;
1289}
1290
c3412dcb
WS
1291static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
1292{
1293 u32 ra,rs;
1294 unsigned long tmp;
1295
1296 ra = (instword >> 16) & 0x1f;
1297 rs = (instword >> 21) & 0x1f;
1298
1299 tmp = regs->gpr[rs];
1300 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
1301 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
1302 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
1303 regs->gpr[ra] = tmp;
1304
1305 return 0;
1306}
1307
c1469f13
KG
1308static int emulate_isel(struct pt_regs *regs, u32 instword)
1309{
1310 u8 rT = (instword >> 21) & 0x1f;
1311 u8 rA = (instword >> 16) & 0x1f;
1312 u8 rB = (instword >> 11) & 0x1f;
1313 u8 BC = (instword >> 6) & 0x1f;
1314 u8 bit;
1315 unsigned long tmp;
1316
1317 tmp = (rA == 0) ? 0 : regs->gpr[rA];
1318 bit = (regs->ccr >> (31 - BC)) & 0x1;
1319
1320 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
1321
1322 return 0;
1323}
1324
6ce6c629
MN
1325#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1326static inline bool tm_abort_check(struct pt_regs *regs, int cause)
1327{
1328 /* If we're emulating a load/store in an active transaction, we cannot
1329 * emulate it as the kernel operates in transaction suspended context.
1330 * We need to abort the transaction. This creates a persistent TM
1331 * abort so tell the user what caused it with a new code.
1332 */
1333 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
1334 tm_enable();
1335 tm_abort(cause);
1336 return true;
1337 }
1338 return false;
1339}
1340#else
1341static inline bool tm_abort_check(struct pt_regs *regs, int reason)
1342{
1343 return false;
1344}
1345#endif
1346
14cf11af
PM
1347static int emulate_instruction(struct pt_regs *regs)
1348{
1349 u32 instword;
1350 u32 rd;
1351
4288e343 1352 if (!user_mode(regs))
14cf11af 1353 return -EINVAL;
14cf11af
PM
1354
1355 if (get_user(instword, (u32 __user *)(regs->nip)))
1356 return -EFAULT;
1357
1358 /* Emulate the mfspr rD, PVR. */
16c57b36 1359 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
eecff81d 1360 PPC_WARN_EMULATED(mfpvr, regs);
14cf11af
PM
1361 rd = (instword >> 21) & 0x1f;
1362 regs->gpr[rd] = mfspr(SPRN_PVR);
1363 return 0;
1364 }
1365
1366 /* Emulating the dcba insn is just a no-op. */
80947e7c 1367 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
eecff81d 1368 PPC_WARN_EMULATED(dcba, regs);
14cf11af 1369 return 0;
80947e7c 1370 }
14cf11af
PM
1371
1372 /* Emulate the mcrxr insn. */
16c57b36 1373 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
86417780 1374 int shift = (instword >> 21) & 0x1c;
14cf11af
PM
1375 unsigned long msk = 0xf0000000UL >> shift;
1376
eecff81d 1377 PPC_WARN_EMULATED(mcrxr, regs);
14cf11af
PM
1378 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
1379 regs->xer &= ~0xf0000000UL;
1380 return 0;
1381 }
1382
1383 /* Emulate load/store string insn. */
80947e7c 1384 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
6ce6c629
MN
1385 if (tm_abort_check(regs,
1386 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
1387 return -EINVAL;
eecff81d 1388 PPC_WARN_EMULATED(string, regs);
14cf11af 1389 return emulate_string_inst(regs, instword);
80947e7c 1390 }
14cf11af 1391
c3412dcb 1392 /* Emulate the popcntb (Population Count Bytes) instruction. */
16c57b36 1393 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
eecff81d 1394 PPC_WARN_EMULATED(popcntb, regs);
c3412dcb
WS
1395 return emulate_popcntb_inst(regs, instword);
1396 }
1397
c1469f13 1398 /* Emulate isel (Integer Select) instruction */
16c57b36 1399 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
eecff81d 1400 PPC_WARN_EMULATED(isel, regs);
c1469f13
KG
1401 return emulate_isel(regs, instword);
1402 }
1403
9863c28a
JY
1404 /* Emulate sync instruction variants */
1405 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
1406 PPC_WARN_EMULATED(sync, regs);
1407 asm volatile("sync");
1408 return 0;
1409 }
1410
efcac658
AK
1411#ifdef CONFIG_PPC64
1412 /* Emulate the mfspr rD, DSCR. */
73d2fb75
AB
1413 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
1414 PPC_INST_MFSPR_DSCR_USER) ||
1415 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
1416 PPC_INST_MFSPR_DSCR)) &&
efcac658
AK
1417 cpu_has_feature(CPU_FTR_DSCR)) {
1418 PPC_WARN_EMULATED(mfdscr, regs);
1419 rd = (instword >> 21) & 0x1f;
1420 regs->gpr[rd] = mfspr(SPRN_DSCR);
1421 return 0;
1422 }
1423 /* Emulate the mtspr DSCR, rD. */
73d2fb75
AB
1424 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
1425 PPC_INST_MTSPR_DSCR_USER) ||
1426 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
1427 PPC_INST_MTSPR_DSCR)) &&
efcac658
AK
1428 cpu_has_feature(CPU_FTR_DSCR)) {
1429 PPC_WARN_EMULATED(mtdscr, regs);
1430 rd = (instword >> 21) & 0x1f;
00ca0de0 1431 current->thread.dscr = regs->gpr[rd];
efcac658 1432 current->thread.dscr_inherit = 1;
00ca0de0 1433 mtspr(SPRN_DSCR, current->thread.dscr);
efcac658
AK
1434 return 0;
1435 }
1436#endif
1437
14cf11af
PM
1438 return -EINVAL;
1439}
1440
f8d35553 1441#ifdef CONFIG_GENERIC_BUG
73c9ceab 1442int is_valid_bugaddr(unsigned long addr)
14cf11af 1443{
73c9ceab 1444 return is_kernel_addr(addr);
14cf11af 1445}
f8d35553 1446#endif
14cf11af 1447
3a3b5aa6
KH
1448#ifdef CONFIG_MATH_EMULATION
1449static int emulate_math(struct pt_regs *regs)
1450{
1451 int ret;
3a3b5aa6
KH
1452
1453 ret = do_mathemu(regs);
1454 if (ret >= 0)
1455 PPC_WARN_EMULATED(math, regs);
1456
1457 switch (ret) {
1458 case 0:
1459 emulate_single_step(regs);
1460 return 0;
1461 case 1: {
1462 int code = 0;
de79f7b9 1463 code = __parse_fpscr(current->thread.fp_state.fpscr);
3a3b5aa6
KH
1464 _exception(SIGFPE, regs, code, regs->nip);
1465 return 0;
1466 }
1467 case -EFAULT:
1468 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1469 return 0;
1470 }
1471
1472 return -1;
1473}
1474#else
1475static inline int emulate_math(struct pt_regs *regs) { return -1; }
1476#endif
1477
fd3f1e0f 1478static void do_program_check(struct pt_regs *regs)
14cf11af
PM
1479{
1480 unsigned int reason = get_reason(regs);
14cf11af 1481
aa42c69c 1482 /* We can now get here via a FP Unavailable exception if the core
04903a30 1483 * has no FPU, in that case the reason flags will be 0 */
14cf11af 1484
dc1c1ca3
SR
1485 if (reason & REASON_FP) {
1486 /* IEEE FP exception */
1487 parse_fpe(regs);
fd3f1e0f 1488 return;
8dad3f92
PM
1489 }
1490 if (reason & REASON_TRAP) {
a4c3f909 1491 unsigned long bugaddr;
ba797b28
JW
1492 /* Debugger is first in line to stop recursive faults in
1493 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1494 if (debugger_bpt(regs))
fd3f1e0f 1495 return;
ba797b28 1496
6cc89bad 1497 if (kprobe_handler(regs))
fd3f1e0f 1498 return;
6cc89bad 1499
14cf11af 1500 /* trap exception */
dc1c1ca3
SR
1501 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1502 == NOTIFY_STOP)
fd3f1e0f 1503 return;
73c9ceab 1504
a4c3f909
BS
1505 bugaddr = regs->nip;
1506 /*
1507 * Fixup bugaddr for BUG_ON() in real mode
1508 */
1509 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1510 bugaddr += PAGE_OFFSET;
1511
d5835fb6 1512 if (!user_mode(regs) &&
a4c3f909 1513 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
b49e578b
CL
1514 regs_add_return_ip(regs, 4);
1515 return;
14cf11af 1516 }
5bcba4e6 1517
c3f43096
BG
1518 /* User mode considers other cases after enabling IRQs */
1519 if (!user_mode(regs)) {
1520 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1521 return;
5bcba4e6 1522 }
8dad3f92 1523 }
bc2a9408
MN
1524#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1525 if (reason & REASON_TM) {
1526 /* This is a TM "Bad Thing Exception" program check.
1527 * This occurs when:
1528 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1529 * transition in TM states.
1530 * - A trechkpt is attempted when transactional.
1531 * - A treclaim is attempted when non transactional.
1532 * - A tend is illegally attempted.
1533 * - writing a TM SPR when transactional.
632f0574
ME
1534 *
1535 * If usermode caused this, it's done something illegal and
bc2a9408
MN
1536 * gets a SIGILL slap on the wrist. We call it an illegal
1537 * operand to distinguish from the instruction just being bad
1538 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1539 * illegal /placement/ of a valid instruction.
1540 */
1541 if (user_mode(regs)) {
1542 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
fd3f1e0f 1543 return;
bc2a9408
MN
1544 } else {
1545 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
11be3958
BL
1546 "at %lx (msr 0x%lx) tm_scratch=%llx\n",
1547 regs->nip, regs->msr, get_paca()->tm_scratch);
bc2a9408
MN
1548 die("Unrecoverable exception", regs, SIGABRT);
1549 }
1550 }
1551#endif
8dad3f92 1552
b3f6a459
ME
1553 /*
1554 * If we took the program check in the kernel skip down to sending a
c3f43096
BG
1555 * SIGILL. The subsequent cases all relate to user space, such as
1556 * emulating instructions which we should only do for user space. We
1557 * also do not want to enable interrupts for kernel faults because that
1558 * might lead to further faults, and loose the context of the original
1559 * exception.
b3f6a459
ME
1560 */
1561 if (!user_mode(regs))
1562 goto sigill;
1563
e6f8a6c8 1564 interrupt_cond_local_irq_enable(regs);
cd8a5673 1565
c3f43096
BG
1566 /*
1567 * (reason & REASON_TRAP) is mostly handled before enabling IRQs,
1568 * except get_user_instr() can sleep so we cannot reliably inspect the
1569 * current instruction in that context. Now that we know we are
1570 * handling a user space trap and can sleep, we can check if the trap
1571 * was a hashchk failure.
1572 */
1573 if (reason & REASON_TRAP) {
1574 if (cpu_has_feature(CPU_FTR_DEXCR_NPHIE)) {
1575 ppc_inst_t insn;
1576
1577 if (get_user_instr(insn, (void __user *)regs->nip)) {
1578 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1579 return;
1580 }
1581
1582 if (ppc_inst_primary_opcode(insn) == 31 &&
1583 get_xop(ppc_inst_val(insn)) == OP_31_XOP_HASHCHK) {
1584 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1585 return;
1586 }
1587 }
1588
1589 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1590 return;
1591 }
1592
04903a30
KG
1593 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1594 * but there seems to be a hardware bug on the 405GP (RevD)
1595 * that means ESR is sometimes set incorrectly - either to
1596 * ESR_DST (!?) or 0. In the process of chasing this with the
1597 * hardware people - not sure if it can happen on any illegal
1598 * instruction or only on FP instructions, whether there is a
4e63f8ed
BH
1599 * pattern to occurrences etc. -dgibson 31/Mar/2003
1600 */
3a3b5aa6 1601 if (!emulate_math(regs))
fd3f1e0f 1602 return;
04903a30 1603
8dad3f92
PM
1604 /* Try to emulate it if we should. */
1605 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
14cf11af
PM
1606 switch (emulate_instruction(regs)) {
1607 case 0:
59dc5bfc 1608 regs_add_return_ip(regs, 4);
14cf11af 1609 emulate_single_step(regs);
fd3f1e0f 1610 return;
14cf11af
PM
1611 case -EFAULT:
1612 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
fd3f1e0f 1613 return;
14cf11af
PM
1614 }
1615 }
8dad3f92 1616
b3f6a459 1617sigill:
8dad3f92
PM
1618 if (reason & REASON_PRIVILEGED)
1619 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1620 else
1621 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
ba12eede 1622
fd3f1e0f
NP
1623}
1624
3a96570f 1625DEFINE_INTERRUPT_HANDLER(program_check_exception)
fd3f1e0f 1626{
fd3f1e0f 1627 do_program_check(regs);
14cf11af
PM
1628}
1629
bf593907
PM
1630/*
1631 * This occurs when running in hypervisor mode on POWER6 or later
1632 * and an illegal instruction is encountered.
1633 */
3a96570f 1634DEFINE_INTERRUPT_HANDLER(emulation_assist_interrupt)
bf593907 1635{
59dc5bfc 1636 regs_set_return_msr(regs, regs->msr | REASON_ILLEGAL);
fd3f1e0f 1637 do_program_check(regs);
bf593907
PM
1638}
1639
3a96570f 1640DEFINE_INTERRUPT_HANDLER(alignment_exception)
14cf11af 1641{
4393c4f6 1642 int sig, code, fixed = 0;
9409d2f9 1643 unsigned long reason;
14cf11af 1644
e6f8a6c8 1645 interrupt_cond_local_irq_enable(regs);
a3512b2d 1646
9409d2f9 1647 reason = get_reason(regs);
9409d2f9
JN
1648 if (reason & REASON_BOUNDARY) {
1649 sig = SIGBUS;
1650 code = BUS_ADRALN;
1651 goto bad;
1652 }
1653
6ce6c629 1654 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
540d4d34 1655 return;
6ce6c629 1656
e9370ae1
PM
1657 /* we don't implement logging of alignment exceptions */
1658 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1659 fixed = fix_alignment(regs);
14cf11af
PM
1660
1661 if (fixed == 1) {
9409d2f9 1662 /* skip over emulated instruction */
59dc5bfc 1663 regs_add_return_ip(regs, inst_length(reason));
14cf11af 1664 emulate_single_step(regs);
540d4d34 1665 return;
14cf11af
PM
1666 }
1667
dc1c1ca3 1668 /* Operand address was bad */
14cf11af 1669 if (fixed == -EFAULT) {
4393c4f6
BH
1670 sig = SIGSEGV;
1671 code = SEGV_ACCERR;
1672 } else {
1673 sig = SIGBUS;
1674 code = BUS_ADRALN;
14cf11af 1675 }
9409d2f9 1676bad:
4393c4f6
BH
1677 if (user_mode(regs))
1678 _exception(sig, regs, code, regs->dar);
1679 else
8458c628 1680 bad_page_fault(regs, sig);
14cf11af
PM
1681}
1682
3a96570f 1683DEFINE_INTERRUPT_HANDLER(stack_overflow_exception)
3978eb78 1684{
3978eb78 1685 die("Kernel stack overflow", regs, SIGSEGV);
3978eb78
CL
1686}
1687
3a96570f 1688DEFINE_INTERRUPT_HANDLER(kernel_fp_unavailable_exception)
dc1c1ca3
SR
1689{
1690 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1691 "%lx at %lx\n", regs->trap, regs->nip);
1692 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1693}
dc1c1ca3 1694
3a96570f 1695DEFINE_INTERRUPT_HANDLER(altivec_unavailable_exception)
dc1c1ca3 1696{
dc1c1ca3
SR
1697 if (user_mode(regs)) {
1698 /* A user program has executed an altivec instruction,
1699 but this kernel doesn't support altivec. */
1700 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
540d4d34 1701 return;
dc1c1ca3 1702 }
6c4841c2 1703
dc1c1ca3
SR
1704 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1705 "%lx at %lx\n", regs->trap, regs->nip);
1706 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
dc1c1ca3
SR
1707}
1708
3a96570f 1709DEFINE_INTERRUPT_HANDLER(vsx_unavailable_exception)
ce48b210
MN
1710{
1711 if (user_mode(regs)) {
1712 /* A user program has executed an vsx instruction,
1713 but this kernel doesn't support vsx. */
1714 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1715 return;
1716 }
1717
1718 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1719 "%lx at %lx\n", regs->trap, regs->nip);
1720 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1721}
1722
fcdb758c 1723#ifdef CONFIG_PPC_BOOK3S_64
172f7aaa
CB
1724static void tm_unavailable(struct pt_regs *regs)
1725{
5d176f75
CB
1726#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1727 if (user_mode(regs)) {
1728 current->thread.load_tm++;
59dc5bfc 1729 regs_set_return_msr(regs, regs->msr | MSR_TM);
5d176f75
CB
1730 tm_enable();
1731 tm_restore_sprs(&current->thread);
1732 return;
1733 }
1734#endif
172f7aaa
CB
1735 pr_emerg("Unrecoverable TM Unavailable Exception "
1736 "%lx at %lx\n", regs->trap, regs->nip);
1737 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1738}
1739
3a96570f 1740DEFINE_INTERRUPT_HANDLER(facility_unavailable_exception)
d0c0c9a1 1741{
021424a1 1742 static char *facility_strings[] = {
2517617e
MN
1743 [FSCR_FP_LG] = "FPU",
1744 [FSCR_VECVSX_LG] = "VMX/VSX",
1745 [FSCR_DSCR_LG] = "DSCR",
1746 [FSCR_PM_LG] = "PMU SPRs",
1747 [FSCR_BHRB_LG] = "BHRB",
1748 [FSCR_TM_LG] = "TM",
1749 [FSCR_EBB_LG] = "EBB",
1750 [FSCR_TAR_LG] = "TAR",
794464f4 1751 [FSCR_MSGP_LG] = "MSGP",
9b7ff0c6 1752 [FSCR_SCV_LG] = "SCV",
2aa6195e 1753 [FSCR_PREFIX_LG] = "PREFIX",
021424a1 1754 };
2517617e 1755 char *facility = "unknown";
021424a1 1756 u64 value;
c952c1c4 1757 u32 instword, rd;
2517617e
MN
1758 u8 status;
1759 bool hv;
021424a1 1760
7153d4bf 1761 hv = (TRAP(regs) == INTERRUPT_H_FAC_UNAVAIL);
2517617e 1762 if (hv)
b14b6260 1763 value = mfspr(SPRN_HFSCR);
2517617e
MN
1764 else
1765 value = mfspr(SPRN_FSCR);
1766
1767 status = value >> 56;
709b973c
AK
1768 if ((hv || status >= 2) &&
1769 (status < ARRAY_SIZE(facility_strings)) &&
1770 facility_strings[status])
1771 facility = facility_strings[status];
1772
1773 /* We should not have taken this interrupt in kernel */
1774 if (!user_mode(regs)) {
1775 pr_emerg("Facility '%s' unavailable (%d) exception in kernel mode at %lx\n",
1776 facility, status, regs->nip);
1777 die("Unexpected facility unavailable exception", regs, SIGABRT);
1778 }
1779
e6f8a6c8 1780 interrupt_cond_local_irq_enable(regs);
709b973c 1781
2517617e 1782 if (status == FSCR_DSCR_LG) {
c952c1c4
AK
1783 /*
1784 * User is accessing the DSCR register using the problem
1785 * state only SPR number (0x03) either through a mfspr or
1786 * a mtspr instruction. If it is a write attempt through
1787 * a mtspr, then we set the inherit bit. This also allows
1788 * the user to write or read the register directly in the
1789 * future by setting via the FSCR DSCR bit. But in case it
1790 * is a read DSCR attempt through a mfspr instruction, we
1791 * just emulate the instruction instead. This code path will
1792 * always emulate all the mfspr instructions till the user
446957ba 1793 * has attempted at least one mtspr instruction. This way it
c952c1c4
AK
1794 * preserves the same behaviour when the user is accessing
1795 * the DSCR through privilege level only SPR number (0x11)
1796 * which is emulated through illegal instruction exception.
1797 * We always leave HFSCR DSCR set.
2517617e 1798 */
c952c1c4
AK
1799 if (get_user(instword, (u32 __user *)(regs->nip))) {
1800 pr_err("Failed to fetch the user instruction\n");
1801 return;
1802 }
1803
1804 /* Write into DSCR (mtspr 0x03, RS) */
1805 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1806 == PPC_INST_MTSPR_DSCR_USER) {
1807 rd = (instword >> 21) & 0x1f;
1808 current->thread.dscr = regs->gpr[rd];
1809 current->thread.dscr_inherit = 1;
b57bd2de
MN
1810 current->thread.fscr |= FSCR_DSCR;
1811 mtspr(SPRN_FSCR, current->thread.fscr);
c952c1c4
AK
1812 }
1813
1814 /* Read from DSCR (mfspr RT, 0x03) */
1815 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1816 == PPC_INST_MFSPR_DSCR_USER) {
1817 if (emulate_instruction(regs)) {
1818 pr_err("DSCR based mfspr emulation failed\n");
1819 return;
1820 }
59dc5bfc 1821 regs_add_return_ip(regs, 4);
c952c1c4
AK
1822 emulate_single_step(regs);
1823 }
2517617e 1824 return;
b14b6260
ME
1825 }
1826
172f7aaa
CB
1827 if (status == FSCR_TM_LG) {
1828 /*
1829 * If we're here then the hardware is TM aware because it
1830 * generated an exception with FSRM_TM set.
1831 *
1832 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1833 * told us not to do TM, or the kernel is not built with TM
1834 * support.
1835 *
1836 * If both of those things are true, then userspace can spam the
1837 * console by triggering the printk() below just by continually
1838 * doing tbegin (or any TM instruction). So in that case just
1839 * send the process a SIGILL immediately.
1840 */
1841 if (!cpu_has_feature(CPU_FTR_TM))
1842 goto out;
1843
1844 tm_unavailable(regs);
1845 return;
1846 }
1847
93c2ec0f
BS
1848 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1849 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
d0c0c9a1 1850
172f7aaa 1851out:
709b973c 1852 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
d0c0c9a1 1853}
2517617e 1854#endif
d0c0c9a1 1855
f54db641
MN
1856#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1857
3a96570f 1858DEFINE_INTERRUPT_HANDLER(fp_unavailable_tm)
f54db641
MN
1859{
1860 /* Note: This does not handle any kind of FP laziness. */
1861
1862 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1863 regs->nip, regs->msr);
f54db641
MN
1864
1865 /* We can only have got here if the task started using FP after
1866 * beginning the transaction. So, the transactional regs are just a
1867 * copy of the checkpointed ones. But, we still need to recheckpoint
1868 * as we're enabling FP for the process; it will return, abort the
1869 * transaction, and probably retry but now with FP enabled. So the
1870 * checkpointed FP registers need to be loaded.
1871 */
d31626f7 1872 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
96695563
BL
1873
1874 /*
1875 * Reclaim initially saved out bogus (lazy) FPRs to ckfp_state, and
1876 * then it was overwrite by the thr->fp_state by tm_reclaim_thread().
1877 *
1878 * At this point, ck{fp,vr}_state contains the exact values we want to
1879 * recheckpoint.
1880 */
f54db641
MN
1881
1882 /* Enable FP for the task: */
a7771176 1883 current->thread.load_fp = 1;
f54db641 1884
96695563
BL
1885 /*
1886 * Recheckpoint all the checkpointed ckpt, ck{fp, vr}_state registers.
f54db641 1887 */
eb5c3f1c 1888 tm_recheckpoint(&current->thread);
f54db641
MN
1889}
1890
3a96570f 1891DEFINE_INTERRUPT_HANDLER(altivec_unavailable_tm)
f54db641
MN
1892{
1893 /* See the comments in fp_unavailable_tm(). This function operates
1894 * the same way.
1895 */
1896
1897 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1898 "MSR=%lx\n",
1899 regs->nip, regs->msr);
d31626f7 1900 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
a7771176 1901 current->thread.load_vec = 1;
eb5c3f1c 1902 tm_recheckpoint(&current->thread);
f54db641
MN
1903 current->thread.used_vr = 1;
1904}
f54db641 1905
3a96570f 1906DEFINE_INTERRUPT_HANDLER(vsx_unavailable_tm)
f54db641
MN
1907{
1908 /* See the comments in fp_unavailable_tm(). This works similarly,
1909 * though we're loading both FP and VEC registers in here.
1910 *
1911 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1912 * regs. Either way, set MSR_VSX.
1913 */
1914
1915 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1916 "MSR=%lx\n",
1917 regs->nip, regs->msr);
1918
3ac8ff1c
PM
1919 current->thread.used_vsr = 1;
1920
f54db641 1921 /* This reclaims FP and/or VR regs if they're already enabled */
d31626f7 1922 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
f54db641 1923
a7771176
CB
1924 current->thread.load_vec = 1;
1925 current->thread.load_fp = 1;
3ac8ff1c 1926
eb5c3f1c 1927 tm_recheckpoint(&current->thread);
f54db641 1928}
f54db641
MN
1929#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1930
3a96570f
NP
1931#ifdef CONFIG_PPC64
1932DECLARE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi);
1933DEFINE_INTERRUPT_HANDLER_NMI(performance_monitor_exception_nmi)
156b5371 1934{
156b5371
NP
1935 __this_cpu_inc(irq_stat.pmu_irqs);
1936
1937 perf_irq(regs);
1938
3a96570f 1939 return 0;
156b5371 1940}
3a96570f 1941#endif
156b5371 1942
3a96570f
NP
1943DECLARE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async);
1944DEFINE_INTERRUPT_HANDLER_ASYNC(performance_monitor_exception_async)
dc1c1ca3 1945{
69111bac 1946 __this_cpu_inc(irq_stat.pmu_irqs);
89713ed1 1947
dc1c1ca3 1948 perf_irq(regs);
156b5371
NP
1949}
1950
3a96570f 1951DEFINE_INTERRUPT_HANDLER_RAW(performance_monitor_exception)
156b5371
NP
1952{
1953 /*
1954 * On 64-bit, if perf interrupts hit in a local_irq_disable
1955 * (soft-masked) region, we consider them as NMIs. This is required to
1956 * prevent hash faults on user addresses when reading callchains (and
1957 * looks better from an irq tracing perspective).
1958 */
1959 if (IS_ENABLED(CONFIG_PPC64) && unlikely(arch_irq_disabled_regs(regs)))
1960 performance_monitor_exception_nmi(regs);
1961 else
1962 performance_monitor_exception_async(regs);
3a96570f
NP
1963
1964 return 0;
dc1c1ca3 1965}
dc1c1ca3 1966
172ae2e7 1967#ifdef CONFIG_PPC_ADV_DEBUG_REGS
3bffb652
DK
1968static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1969{
1970 int changed = 0;
1971 /*
1972 * Determine the cause of the debug event, clear the
1973 * event flags and send a trap to the handler. Torez
1974 */
1975 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1976 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1977#ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
51ae8d4a 1978 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
3bffb652 1979#endif
47355040 1980 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status,
3bffb652
DK
1981 5);
1982 changed |= 0x01;
1983 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1984 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
47355040 1985 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status,
3bffb652
DK
1986 6);
1987 changed |= 0x01;
1988 } else if (debug_status & DBSR_IAC1) {
51ae8d4a 1989 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
3bffb652 1990 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
47355040 1991 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status,
3bffb652
DK
1992 1);
1993 changed |= 0x01;
1994 } else if (debug_status & DBSR_IAC2) {
51ae8d4a 1995 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
47355040 1996 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status,
3bffb652
DK
1997 2);
1998 changed |= 0x01;
1999 } else if (debug_status & DBSR_IAC3) {
51ae8d4a 2000 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
3bffb652 2001 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
47355040 2002 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status,
3bffb652
DK
2003 3);
2004 changed |= 0x01;
2005 } else if (debug_status & DBSR_IAC4) {
51ae8d4a 2006 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
47355040 2007 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status,
3bffb652
DK
2008 4);
2009 changed |= 0x01;
2010 }
2011 /*
2012 * At the point this routine was called, the MSR(DE) was turned off.
2013 * Check all other debug flags and see if that bit needs to be turned
2014 * back on or not.
2015 */
51ae8d4a 2016 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
95791988 2017 current->thread.debug.dbcr1))
59dc5bfc 2018 regs_set_return_msr(regs, regs->msr | MSR_DE);
3bffb652
DK
2019 else
2020 /* Make sure the IDM flag is off */
51ae8d4a 2021 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
3bffb652
DK
2022
2023 if (changed & 0x01)
51ae8d4a 2024 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
3bffb652 2025}
14cf11af 2026
3a96570f 2027DEFINE_INTERRUPT_HANDLER(DebugException)
14cf11af 2028{
755d6641
NP
2029 unsigned long debug_status = regs->dsisr;
2030
51ae8d4a 2031 current->thread.debug.dbsr = debug_status;
3bffb652 2032
ec097c84
RM
2033 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
2034 * on server, it stops on the target of the branch. In order to simulate
2035 * the server behaviour, we thus restart right away with a single step
2036 * instead of stopping here when hitting a BT
2037 */
2038 if (debug_status & DBSR_BT) {
59dc5bfc 2039 regs_set_return_msr(regs, regs->msr & ~MSR_DE);
ec097c84
RM
2040
2041 /* Disable BT */
2042 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
2043 /* Clear the BT event */
2044 mtspr(SPRN_DBSR, DBSR_BT);
2045
2046 /* Do the single step trick only when coming from userspace */
2047 if (user_mode(regs)) {
51ae8d4a
BB
2048 current->thread.debug.dbcr0 &= ~DBCR0_BT;
2049 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
59dc5bfc 2050 regs_set_return_msr(regs, regs->msr | MSR_DE);
ec097c84
RM
2051 return;
2052 }
2053
6cc89bad
NR
2054 if (kprobe_post_handler(regs))
2055 return;
2056
ec097c84
RM
2057 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
2058 5, SIGTRAP) == NOTIFY_STOP) {
2059 return;
2060 }
2061 if (debugger_sstep(regs))
2062 return;
2063 } else if (debug_status & DBSR_IC) { /* Instruction complete */
59dc5bfc 2064 regs_set_return_msr(regs, regs->msr & ~MSR_DE);
f8279621
KG
2065
2066 /* Disable instruction completion */
2067 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
2068 /* Clear the instruction completion event */
2069 mtspr(SPRN_DBSR, DBSR_IC);
2070
6cc89bad
NR
2071 if (kprobe_post_handler(regs))
2072 return;
2073
f8279621
KG
2074 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
2075 5, SIGTRAP) == NOTIFY_STOP) {
2076 return;
2077 }
2078
2079 if (debugger_sstep(regs))
2080 return;
2081
d6a61bfc 2082 if (user_mode(regs)) {
51ae8d4a
BB
2083 current->thread.debug.dbcr0 &= ~DBCR0_IC;
2084 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
2085 current->thread.debug.dbcr1))
59dc5bfc 2086 regs_set_return_msr(regs, regs->msr | MSR_DE);
3bffb652
DK
2087 else
2088 /* Make sure the IDM bit is off */
51ae8d4a 2089 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
d6a61bfc 2090 }
3bffb652
DK
2091
2092 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
2093 } else
2094 handle_debug(regs, debug_status);
14cf11af 2095}
172ae2e7 2096#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
14cf11af 2097
14cf11af 2098#ifdef CONFIG_ALTIVEC
3a96570f 2099DEFINE_INTERRUPT_HANDLER(altivec_assist_exception)
14cf11af
PM
2100{
2101 int err;
2102
14cf11af
PM
2103 if (!user_mode(regs)) {
2104 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
2105 " at %lx\n", regs->nip);
8dad3f92 2106 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
14cf11af
PM
2107 }
2108
dc1c1ca3 2109 flush_altivec_to_thread(current);
dc1c1ca3 2110
eecff81d 2111 PPC_WARN_EMULATED(altivec, regs);
14cf11af
PM
2112 err = emulate_altivec(regs);
2113 if (err == 0) {
59dc5bfc 2114 regs_add_return_ip(regs, 4); /* skip emulated instruction */
14cf11af
PM
2115 emulate_single_step(regs);
2116 return;
2117 }
2118
2119 if (err == -EFAULT) {
2120 /* got an error reading the instruction */
2121 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2122 } else {
2123 /* didn't recognize the instruction */
2124 /* XXX quick hack for now: set the non-Java bit in the VSCR */
76462232
CD
2125 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
2126 "in %s at %lx\n", current->comm, regs->nip);
de79f7b9 2127 current->thread.vr_state.vscr.u[3] |= 0x10000;
14cf11af
PM
2128 }
2129}
2130#endif /* CONFIG_ALTIVEC */
2131
dfc3095c 2132#ifdef CONFIG_PPC_85xx
3a96570f 2133DEFINE_INTERRUPT_HANDLER(CacheLockingException)
14cf11af 2134{
b4ced803
NP
2135 unsigned long error_code = regs->dsisr;
2136
14cf11af
PM
2137 /* We treat cache locking instructions from the user
2138 * as priv ops, in the future we could try to do
2139 * something smarter
2140 */
2141 if (error_code & (ESR_DLK|ESR_ILK))
2142 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
2143 return;
2144}
dfc3095c 2145#endif /* CONFIG_PPC_85xx */
14cf11af
PM
2146
2147#ifdef CONFIG_SPE
3a96570f 2148DEFINE_INTERRUPT_HANDLER(SPEFloatingPointException)
14cf11af
PM
2149{
2150 unsigned long spefscr;
2151 int fpexc_mode;
aeb1c0f6 2152 int code = FPE_FLTUNK;
6a800f36
LY
2153 int err;
2154
e6f8a6c8 2155 interrupt_cond_local_irq_enable(regs);
ef429124 2156
685659ee 2157 flush_spe_to_thread(current);
14cf11af
PM
2158
2159 spefscr = current->thread.spefscr;
2160 fpexc_mode = current->thread.fpexc_mode;
2161
14cf11af
PM
2162 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
2163 code = FPE_FLTOVF;
14cf11af
PM
2164 }
2165 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
2166 code = FPE_FLTUND;
14cf11af
PM
2167 }
2168 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
2169 code = FPE_FLTDIV;
2170 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
2171 code = FPE_FLTINV;
14cf11af
PM
2172 }
2173 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
2174 code = FPE_FLTRES;
2175
6a800f36
LY
2176 err = do_spe_mathemu(regs);
2177 if (err == 0) {
59dc5bfc 2178 regs_add_return_ip(regs, 4); /* skip emulated instruction */
6a800f36
LY
2179 emulate_single_step(regs);
2180 return;
2181 }
2182
2183 if (err == -EFAULT) {
2184 /* got an error reading the instruction */
2185 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2186 } else if (err == -EINVAL) {
2187 /* didn't recognize the instruction */
2188 printk(KERN_ERR "unrecognized spe instruction "
2189 "in %s at %lx\n", current->comm, regs->nip);
2190 } else {
2191 _exception(SIGFPE, regs, code, regs->nip);
2192 }
14cf11af 2193
14cf11af
PM
2194 return;
2195}
6a800f36 2196
3a96570f 2197DEFINE_INTERRUPT_HANDLER(SPEFloatingPointRoundException)
6a800f36 2198{
6a800f36
LY
2199 int err;
2200
e6f8a6c8 2201 interrupt_cond_local_irq_enable(regs);
ef429124 2202
6a800f36
LY
2203 preempt_disable();
2204 if (regs->msr & MSR_SPE)
2205 giveup_spe(current);
2206 preempt_enable();
2207
59dc5bfc 2208 regs_add_return_ip(regs, -4);
6a800f36
LY
2209 err = speround_handler(regs);
2210 if (err == 0) {
59dc5bfc 2211 regs_add_return_ip(regs, 4); /* skip emulated instruction */
6a800f36
LY
2212 emulate_single_step(regs);
2213 return;
2214 }
2215
2216 if (err == -EFAULT) {
2217 /* got an error reading the instruction */
2218 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
2219 } else if (err == -EINVAL) {
2220 /* didn't recognize the instruction */
2221 printk(KERN_ERR "unrecognized spe instruction "
2222 "in %s at %lx\n", current->comm, regs->nip);
2223 } else {
aeb1c0f6 2224 _exception(SIGFPE, regs, FPE_FLTUNK, regs->nip);
6a800f36
LY
2225 return;
2226 }
2227}
14cf11af
PM
2228#endif
2229
dc1c1ca3
SR
2230/*
2231 * We enter here if we get an unrecoverable exception, that is, one
2232 * that happened at a point where the RI (recoverable interrupt) bit
2233 * in the MSR is 0. This indicates that SRR0/1 are live, and that
2234 * we therefore lost state by taking this exception.
2235 */
a58cbed6 2236void __noreturn unrecoverable_exception(struct pt_regs *regs)
dc1c1ca3 2237{
51423a9c
CL
2238 pr_emerg("Unrecoverable exception %lx at %lx (msr=%lx)\n",
2239 regs->trap, regs->nip, regs->msr);
dc1c1ca3 2240 die("Unrecoverable exception", regs, SIGABRT);
a58cbed6
CL
2241 /* die() should not return */
2242 for (;;)
2243 ;
dc1c1ca3 2244}
dc1c1ca3 2245
732b32da 2246#ifdef CONFIG_BOOKE_WDT
3db8aa10 2247DEFINE_INTERRUPT_HANDLER_NMI(WatchdogException)
14cf11af
PM
2248{
2249 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
ca13c130 2250 mtspr(SPRN_TCR, mfspr(SPRN_TCR) & ~TCR_WIE);
3db8aa10 2251 return 0;
14cf11af
PM
2252}
2253#endif
dc1c1ca3 2254
dc1c1ca3
SR
2255/*
2256 * We enter here if we discover during exception entry that we are
2257 * running in supervisor mode with a userspace value in the stack pointer.
2258 */
3a96570f 2259DEFINE_INTERRUPT_HANDLER(kernel_bad_stack)
dc1c1ca3
SR
2260{
2261 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
2262 regs->gpr[1], regs->nip);
2263 die("Bad kernel stack pointer", regs, SIGABRT);
2264}
14cf11af 2265
80947e7c
GU
2266#ifdef CONFIG_PPC_EMULATED_STATS
2267
2268#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
2269
2270struct ppc_emulated ppc_emulated = {
2271#ifdef CONFIG_ALTIVEC
2272 WARN_EMULATED_SETUP(altivec),
2273#endif
2274 WARN_EMULATED_SETUP(dcba),
2275 WARN_EMULATED_SETUP(dcbz),
2276 WARN_EMULATED_SETUP(fp_pair),
2277 WARN_EMULATED_SETUP(isel),
2278 WARN_EMULATED_SETUP(mcrxr),
2279 WARN_EMULATED_SETUP(mfpvr),
2280 WARN_EMULATED_SETUP(multiple),
2281 WARN_EMULATED_SETUP(popcntb),
2282 WARN_EMULATED_SETUP(spe),
2283 WARN_EMULATED_SETUP(string),
a3821b2a 2284 WARN_EMULATED_SETUP(sync),
80947e7c
GU
2285 WARN_EMULATED_SETUP(unaligned),
2286#ifdef CONFIG_MATH_EMULATION
2287 WARN_EMULATED_SETUP(math),
80947e7c
GU
2288#endif
2289#ifdef CONFIG_VSX
2290 WARN_EMULATED_SETUP(vsx),
2291#endif
efcac658
AK
2292#ifdef CONFIG_PPC64
2293 WARN_EMULATED_SETUP(mfdscr),
2294 WARN_EMULATED_SETUP(mtdscr),
f83319d7 2295 WARN_EMULATED_SETUP(lq_stq),
5080332c
MN
2296 WARN_EMULATED_SETUP(lxvw4x),
2297 WARN_EMULATED_SETUP(lxvh8x),
2298 WARN_EMULATED_SETUP(lxvd2x),
2299 WARN_EMULATED_SETUP(lxvb16x),
efcac658 2300#endif
80947e7c
GU
2301};
2302
2303u32 ppc_warn_emulated;
2304
2305void ppc_warn_emulated_print(const char *type)
2306{
76462232
CD
2307 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
2308 type);
80947e7c
GU
2309}
2310
2311static int __init ppc_warn_emulated_init(void)
2312{
860286cf 2313 struct dentry *dir;
80947e7c
GU
2314 unsigned int i;
2315 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
2316
80947e7c 2317 dir = debugfs_create_dir("emulated_instructions",
dbf77fed 2318 arch_debugfs_dir);
80947e7c 2319
860286cf 2320 debugfs_create_u32("do_warn", 0644, dir, &ppc_warn_emulated);
80947e7c 2321
860286cf
GKH
2322 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++)
2323 debugfs_create_u32(entries[i].name, 0644, dir,
2324 (u32 *)&entries[i].val.counter);
80947e7c
GU
2325
2326 return 0;
80947e7c
GU
2327}
2328
2329device_initcall(ppc_warn_emulated_init);
2330
2331#endif /* CONFIG_PPC_EMULATED_STATS */