2 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
3 * Copyright 2007-2010 Freescale Semiconductor, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
10 * Modified by Cort Dougan (cort@cs.nmt.edu)
11 * and Paul Mackerras (paulus@samba.org)
15 * This file handles the architecture-dependent parts of hardware exceptions
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/debug.h>
21 #include <linux/kernel.h>
23 #include <linux/stddef.h>
24 #include <linux/unistd.h>
25 #include <linux/ptrace.h>
26 #include <linux/user.h>
27 #include <linux/interrupt.h>
28 #include <linux/init.h>
29 #include <linux/extable.h>
30 #include <linux/module.h> /* print_modules */
31 #include <linux/prctl.h>
32 #include <linux/delay.h>
33 #include <linux/kprobes.h>
34 #include <linux/kexec.h>
35 #include <linux/backlight.h>
36 #include <linux/bug.h>
37 #include <linux/kdebug.h>
38 #include <linux/ratelimit.h>
39 #include <linux/context_tracking.h>
41 #include <asm/emulated_ops.h>
42 #include <asm/pgtable.h>
43 #include <linux/uaccess.h>
44 #include <asm/debugfs.h>
46 #include <asm/machdep.h>
50 #ifdef CONFIG_PMAC_BACKLIGHT
51 #include <asm/backlight.h>
54 #include <asm/firmware.h>
55 #include <asm/processor.h>
58 #include <asm/kexec.h>
59 #include <asm/ppc-opcode.h>
61 #include <asm/fadump.h>
62 #include <asm/switch_to.h>
64 #include <asm/debug.h>
65 #include <asm/asm-prototypes.h>
67 #include <sysdev/fsl_pci.h>
68 #include <asm/kprobes.h>
70 #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE)
71 int (*__debugger)(struct pt_regs *regs) __read_mostly;
72 int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly;
73 int (*__debugger_bpt)(struct pt_regs *regs) __read_mostly;
74 int (*__debugger_sstep)(struct pt_regs *regs) __read_mostly;
75 int (*__debugger_iabr_match)(struct pt_regs *regs) __read_mostly;
76 int (*__debugger_break_match)(struct pt_regs *regs) __read_mostly;
77 int (*__debugger_fault_handler)(struct pt_regs *regs) __read_mostly;
79 EXPORT_SYMBOL(__debugger);
80 EXPORT_SYMBOL(__debugger_ipi);
81 EXPORT_SYMBOL(__debugger_bpt);
82 EXPORT_SYMBOL(__debugger_sstep);
83 EXPORT_SYMBOL(__debugger_iabr_match);
84 EXPORT_SYMBOL(__debugger_break_match);
85 EXPORT_SYMBOL(__debugger_fault_handler);
88 /* Transactional Memory trap debug */
90 #define TM_DEBUG(x...) printk(KERN_INFO x)
92 #define TM_DEBUG(x...) do { } while(0)
96 * Trap & Exception support
99 #ifdef CONFIG_PMAC_BACKLIGHT
100 static void pmac_backlight_unblank(void)
102 mutex_lock(&pmac_backlight_mutex);
103 if (pmac_backlight) {
104 struct backlight_properties *props;
106 props = &pmac_backlight->props;
107 props->brightness = props->max_brightness;
108 props->power = FB_BLANK_UNBLANK;
109 backlight_update_status(pmac_backlight);
111 mutex_unlock(&pmac_backlight_mutex);
114 static inline void pmac_backlight_unblank(void) { }
117 static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
118 static int die_owner = -1;
119 static unsigned int die_nest_count;
120 static int die_counter;
122 static unsigned long oops_begin(struct pt_regs *regs)
129 /* racy, but better than risking deadlock. */
130 raw_local_irq_save(flags);
131 cpu = smp_processor_id();
132 if (!arch_spin_trylock(&die_lock)) {
133 if (cpu == die_owner)
134 /* nested oops. should stop eventually */;
136 arch_spin_lock(&die_lock);
142 if (machine_is(powermac))
143 pmac_backlight_unblank();
146 NOKPROBE_SYMBOL(oops_begin);
148 static void oops_end(unsigned long flags, struct pt_regs *regs,
152 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
156 if (!die_nest_count) {
157 /* Nest count reaches zero, release the lock. */
159 arch_spin_unlock(&die_lock);
161 raw_local_irq_restore(flags);
163 crash_fadump(regs, "die oops");
166 * A system reset (0x100) is a request to dump, so we always send
167 * it through the crashdump code.
169 if (kexec_should_crash(current) || (TRAP(regs) == 0x100)) {
173 * We aren't the primary crash CPU. We need to send it
174 * to a holding pattern to avoid it ending up in the panic
177 crash_kexec_secondary(regs);
184 * While our oops output is serialised by a spinlock, output
185 * from panic() called below can race and corrupt it. If we
186 * know we are going to panic, delay for 1 second so we have a
187 * chance to get clean backtraces from all CPUs that are oopsing.
189 if (in_interrupt() || panic_on_oops || !current->pid ||
190 is_global_init(current)) {
191 mdelay(MSEC_PER_SEC);
195 panic("Fatal exception in interrupt");
197 panic("Fatal exception");
200 NOKPROBE_SYMBOL(oops_end);
202 static int __die(const char *str, struct pt_regs *regs, long err)
204 printk("Oops: %s, sig: %ld [#%d]\n", str, err, ++die_counter);
206 if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
211 if (IS_ENABLED(CONFIG_PREEMPT))
214 if (IS_ENABLED(CONFIG_SMP))
215 pr_cont("SMP NR_CPUS=%d ", NR_CPUS);
217 if (debug_pagealloc_enabled())
218 pr_cont("DEBUG_PAGEALLOC ");
220 if (IS_ENABLED(CONFIG_NUMA))
223 pr_cont("%s\n", ppc_md.name ? ppc_md.name : "");
225 if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV) == NOTIFY_STOP)
233 NOKPROBE_SYMBOL(__die);
235 void die(const char *str, struct pt_regs *regs, long err)
242 flags = oops_begin(regs);
243 if (__die(str, regs, err))
245 oops_end(flags, regs, err);
247 NOKPROBE_SYMBOL(die);
249 void user_single_step_siginfo(struct task_struct *tsk,
250 struct pt_regs *regs, siginfo_t *info)
252 memset(info, 0, sizeof(*info));
253 info->si_signo = SIGTRAP;
254 info->si_code = TRAP_TRACE;
255 info->si_addr = (void __user *)regs->nip;
258 void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
261 const char fmt32[] = KERN_INFO "%s[%d]: unhandled signal %d " \
262 "at %08lx nip %08lx lr %08lx code %x\n";
263 const char fmt64[] = KERN_INFO "%s[%d]: unhandled signal %d " \
264 "at %016lx nip %016lx lr %016lx code %x\n";
266 if (!user_mode(regs)) {
267 die("Exception in kernel mode", regs, signr);
271 if (show_unhandled_signals && unhandled_signal(current, signr)) {
272 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
273 current->comm, current->pid, signr,
274 addr, regs->nip, regs->link, code);
277 if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs))
280 current->thread.trap_nr = code;
281 memset(&info, 0, sizeof(info));
282 info.si_signo = signr;
284 info.si_addr = (void __user *) addr;
285 force_sig_info(signr, &info, current);
288 void system_reset_exception(struct pt_regs *regs)
291 * Avoid crashes in case of nested NMI exceptions. Recoverability
292 * is determined by RI and in_nmi
294 bool nested = in_nmi();
298 __this_cpu_inc(irq_stat.sreset_irqs);
300 /* See if any machine dependent calls */
301 if (ppc_md.system_reset_exception) {
302 if (ppc_md.system_reset_exception(regs))
306 die("System Reset", regs, SIGABRT);
309 #ifdef CONFIG_PPC_BOOK3S_64
310 BUG_ON(get_paca()->in_nmi == 0);
311 if (get_paca()->in_nmi > 1)
312 panic("Unrecoverable nested System Reset");
314 /* Must die if the interrupt is not recoverable */
315 if (!(regs->msr & MSR_RI))
316 panic("Unrecoverable System Reset");
321 /* What should we do here? We could issue a shutdown or hard reset. */
325 * I/O accesses can cause machine checks on powermacs.
326 * Check if the NIP corresponds to the address of a sync
327 * instruction for which there is an entry in the exception
329 * Note that the 601 only takes a machine check on TEA
330 * (transfer error ack) signal assertion, and does not
331 * set any of the top 16 bits of SRR1.
334 static inline int check_io_access(struct pt_regs *regs)
337 unsigned long msr = regs->msr;
338 const struct exception_table_entry *entry;
339 unsigned int *nip = (unsigned int *)regs->nip;
341 if (((msr & 0xffff0000) == 0 || (msr & (0x80000 | 0x40000)))
342 && (entry = search_exception_tables(regs->nip)) != NULL) {
344 * Check that it's a sync instruction, or somewhere
345 * in the twi; isync; nop sequence that inb/inw/inl uses.
346 * As the address is in the exception table
347 * we should be able to read the instr there.
348 * For the debug message, we look at the preceding
351 if (*nip == PPC_INST_NOP)
353 else if (*nip == PPC_INST_ISYNC)
355 if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) {
359 rb = (*nip >> 11) & 0x1f;
360 printk(KERN_DEBUG "%s bad port %lx at %p\n",
361 (*nip & 0x100)? "OUT to": "IN from",
362 regs->gpr[rb] - _IO_BASE, nip);
364 regs->nip = extable_fixup(entry);
368 #endif /* CONFIG_PPC32 */
372 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
373 /* On 4xx, the reason for the machine check or program exception
375 #define get_reason(regs) ((regs)->dsisr)
376 #define REASON_FP ESR_FP
377 #define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
378 #define REASON_PRIVILEGED ESR_PPR
379 #define REASON_TRAP ESR_PTR
381 /* single-step stuff */
382 #define single_stepping(regs) (current->thread.debug.dbcr0 & DBCR0_IC)
383 #define clear_single_step(regs) (current->thread.debug.dbcr0 &= ~DBCR0_IC)
386 /* On non-4xx, the reason for the machine check or program
387 exception is in the MSR. */
388 #define get_reason(regs) ((regs)->msr)
389 #define REASON_TM SRR1_PROGTM
390 #define REASON_FP SRR1_PROGFPE
391 #define REASON_ILLEGAL SRR1_PROGILL
392 #define REASON_PRIVILEGED SRR1_PROGPRIV
393 #define REASON_TRAP SRR1_PROGTRAP
395 #define single_stepping(regs) ((regs)->msr & MSR_SE)
396 #define clear_single_step(regs) ((regs)->msr &= ~MSR_SE)
399 #if defined(CONFIG_E500)
400 int machine_check_e500mc(struct pt_regs *regs)
402 unsigned long mcsr = mfspr(SPRN_MCSR);
403 unsigned long reason = mcsr;
406 if (reason & MCSR_LD) {
407 recoverable = fsl_rio_mcheck_exception(regs);
408 if (recoverable == 1)
412 printk("Machine check in kernel mode.\n");
413 printk("Caused by (from MCSR=%lx): ", reason);
415 if (reason & MCSR_MCP)
416 printk("Machine Check Signal\n");
418 if (reason & MCSR_ICPERR) {
419 printk("Instruction Cache Parity Error\n");
422 * This is recoverable by invalidating the i-cache.
424 mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
425 while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
429 * This will generally be accompanied by an instruction
430 * fetch error report -- only treat MCSR_IF as fatal
431 * if it wasn't due to an L1 parity error.
436 if (reason & MCSR_DCPERR_MC) {
437 printk("Data Cache Parity Error\n");
440 * In write shadow mode we auto-recover from the error, but it
441 * may still get logged and cause a machine check. We should
442 * only treat the non-write shadow case as non-recoverable.
444 if (!(mfspr(SPRN_L1CSR2) & L1CSR2_DCWS))
448 if (reason & MCSR_L2MMU_MHIT) {
449 printk("Hit on multiple TLB entries\n");
453 if (reason & MCSR_NMI)
454 printk("Non-maskable interrupt\n");
456 if (reason & MCSR_IF) {
457 printk("Instruction Fetch Error Report\n");
461 if (reason & MCSR_LD) {
462 printk("Load Error Report\n");
466 if (reason & MCSR_ST) {
467 printk("Store Error Report\n");
471 if (reason & MCSR_LDG) {
472 printk("Guarded Load Error Report\n");
476 if (reason & MCSR_TLBSYNC)
477 printk("Simultaneous tlbsync operations\n");
479 if (reason & MCSR_BSL2_ERR) {
480 printk("Level 2 Cache Error\n");
484 if (reason & MCSR_MAV) {
487 addr = mfspr(SPRN_MCAR);
488 addr |= (u64)mfspr(SPRN_MCARU) << 32;
490 printk("Machine Check %s Address: %#llx\n",
491 reason & MCSR_MEA ? "Effective" : "Physical", addr);
495 mtspr(SPRN_MCSR, mcsr);
496 return mfspr(SPRN_MCSR) == 0 && recoverable;
499 int machine_check_e500(struct pt_regs *regs)
501 unsigned long reason = mfspr(SPRN_MCSR);
503 if (reason & MCSR_BUS_RBERR) {
504 if (fsl_rio_mcheck_exception(regs))
506 if (fsl_pci_mcheck_exception(regs))
510 printk("Machine check in kernel mode.\n");
511 printk("Caused by (from MCSR=%lx): ", reason);
513 if (reason & MCSR_MCP)
514 printk("Machine Check Signal\n");
515 if (reason & MCSR_ICPERR)
516 printk("Instruction Cache Parity Error\n");
517 if (reason & MCSR_DCP_PERR)
518 printk("Data Cache Push Parity Error\n");
519 if (reason & MCSR_DCPERR)
520 printk("Data Cache Parity Error\n");
521 if (reason & MCSR_BUS_IAERR)
522 printk("Bus - Instruction Address Error\n");
523 if (reason & MCSR_BUS_RAERR)
524 printk("Bus - Read Address Error\n");
525 if (reason & MCSR_BUS_WAERR)
526 printk("Bus - Write Address Error\n");
527 if (reason & MCSR_BUS_IBERR)
528 printk("Bus - Instruction Data Error\n");
529 if (reason & MCSR_BUS_RBERR)
530 printk("Bus - Read Data Bus Error\n");
531 if (reason & MCSR_BUS_WBERR)
532 printk("Bus - Write Data Bus Error\n");
533 if (reason & MCSR_BUS_IPERR)
534 printk("Bus - Instruction Parity Error\n");
535 if (reason & MCSR_BUS_RPERR)
536 printk("Bus - Read Parity Error\n");
541 int machine_check_generic(struct pt_regs *regs)
545 #elif defined(CONFIG_E200)
546 int machine_check_e200(struct pt_regs *regs)
548 unsigned long reason = mfspr(SPRN_MCSR);
550 printk("Machine check in kernel mode.\n");
551 printk("Caused by (from MCSR=%lx): ", reason);
553 if (reason & MCSR_MCP)
554 printk("Machine Check Signal\n");
555 if (reason & MCSR_CP_PERR)
556 printk("Cache Push Parity Error\n");
557 if (reason & MCSR_CPERR)
558 printk("Cache Parity Error\n");
559 if (reason & MCSR_EXCP_ERR)
560 printk("ISI, ITLB, or Bus Error on first instruction fetch for an exception handler\n");
561 if (reason & MCSR_BUS_IRERR)
562 printk("Bus - Read Bus Error on instruction fetch\n");
563 if (reason & MCSR_BUS_DRERR)
564 printk("Bus - Read Bus Error on data load\n");
565 if (reason & MCSR_BUS_WRERR)
566 printk("Bus - Write Bus Error on buffered store or cache line push\n");
570 #elif defined(CONFIG_PPC32)
571 int machine_check_generic(struct pt_regs *regs)
573 unsigned long reason = regs->msr;
575 printk("Machine check in kernel mode.\n");
576 printk("Caused by (from SRR1=%lx): ", reason);
577 switch (reason & 0x601F0000) {
579 printk("Machine check signal\n");
581 case 0: /* for 601 */
583 case 0x140000: /* 7450 MSS error and TEA */
584 printk("Transfer error ack signal\n");
587 printk("Data parity error signal\n");
590 printk("Address parity error signal\n");
593 printk("L1 Data Cache error\n");
596 printk("L1 Instruction Cache error\n");
599 printk("L2 data cache parity error\n");
602 printk("Unknown values in msr\n");
606 #endif /* everything else */
608 void machine_check_exception(struct pt_regs *regs)
610 enum ctx_state prev_state = exception_enter();
613 /* 64s accounts the mce in machine_check_early when in HVMODE */
614 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64) || !cpu_has_feature(CPU_FTR_HVMODE))
615 __this_cpu_inc(irq_stat.mce_exceptions);
617 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
619 /* See if any machine dependent calls. In theory, we would want
620 * to call the CPU first, and call the ppc_md. one if the CPU
621 * one returns a positive number. However there is existing code
622 * that assumes the board gets a first chance, so let's keep it
623 * that way for now and fix things later. --BenH.
625 if (ppc_md.machine_check_exception)
626 recover = ppc_md.machine_check_exception(regs);
627 else if (cur_cpu_spec->machine_check)
628 recover = cur_cpu_spec->machine_check(regs);
633 if (debugger_fault_handler(regs))
636 if (check_io_access(regs))
639 die("Machine check", regs, SIGBUS);
641 /* Must die if the interrupt is not recoverable */
642 if (!(regs->msr & MSR_RI))
643 panic("Unrecoverable Machine check");
646 exception_exit(prev_state);
649 void SMIException(struct pt_regs *regs)
651 die("System Management Interrupt", regs, SIGABRT);
654 void handle_hmi_exception(struct pt_regs *regs)
656 struct pt_regs *old_regs;
658 old_regs = set_irq_regs(regs);
661 if (ppc_md.handle_hmi_exception)
662 ppc_md.handle_hmi_exception(regs);
665 set_irq_regs(old_regs);
668 void unknown_exception(struct pt_regs *regs)
670 enum ctx_state prev_state = exception_enter();
672 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
673 regs->nip, regs->msr, regs->trap);
675 _exception(SIGTRAP, regs, 0, 0);
677 exception_exit(prev_state);
680 void instruction_breakpoint_exception(struct pt_regs *regs)
682 enum ctx_state prev_state = exception_enter();
684 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
685 5, SIGTRAP) == NOTIFY_STOP)
687 if (debugger_iabr_match(regs))
689 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
692 exception_exit(prev_state);
695 void RunModeException(struct pt_regs *regs)
697 _exception(SIGTRAP, regs, 0, 0);
700 void single_step_exception(struct pt_regs *regs)
702 enum ctx_state prev_state = exception_enter();
704 clear_single_step(regs);
706 if (kprobe_post_handler(regs))
709 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
710 5, SIGTRAP) == NOTIFY_STOP)
712 if (debugger_sstep(regs))
715 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
718 exception_exit(prev_state);
720 NOKPROBE_SYMBOL(single_step_exception);
723 * After we have successfully emulated an instruction, we have to
724 * check if the instruction was being single-stepped, and if so,
725 * pretend we got a single-step exception. This was pointed out
726 * by Kumar Gala. -- paulus
728 static void emulate_single_step(struct pt_regs *regs)
730 if (single_stepping(regs))
731 single_step_exception(regs);
734 static inline int __parse_fpscr(unsigned long fpscr)
738 /* Invalid operation */
739 if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
743 else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
747 else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
751 else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
755 else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
761 static void parse_fpe(struct pt_regs *regs)
765 flush_fp_to_thread(current);
767 code = __parse_fpscr(current->thread.fp_state.fpscr);
769 _exception(SIGFPE, regs, code, regs->nip);
773 * Illegal instruction emulation support. Originally written to
774 * provide the PVR to user applications using the mfspr rd, PVR.
775 * Return non-zero if we can't emulate, or -EFAULT if the associated
776 * memory access caused an access fault. Return zero on success.
778 * There are a couple of ways to do this, either "decode" the instruction
779 * or directly match lots of bits. In this case, matching lots of
780 * bits is faster and easier.
783 static int emulate_string_inst(struct pt_regs *regs, u32 instword)
785 u8 rT = (instword >> 21) & 0x1f;
786 u8 rA = (instword >> 16) & 0x1f;
787 u8 NB_RB = (instword >> 11) & 0x1f;
792 /* Early out if we are an invalid form of lswx */
793 if ((instword & PPC_INST_STRING_MASK) == PPC_INST_LSWX)
794 if ((rT == rA) || (rT == NB_RB))
797 EA = (rA == 0) ? 0 : regs->gpr[rA];
799 switch (instword & PPC_INST_STRING_MASK) {
803 num_bytes = regs->xer & 0x7f;
807 num_bytes = (NB_RB == 0) ? 32 : NB_RB;
813 while (num_bytes != 0)
816 u32 shift = 8 * (3 - (pos & 0x3));
818 /* if process is 32-bit, clear upper 32 bits of EA */
819 if ((regs->msr & MSR_64BIT) == 0)
822 switch ((instword & PPC_INST_STRING_MASK)) {
825 if (get_user(val, (u8 __user *)EA))
827 /* first time updating this reg,
831 regs->gpr[rT] |= val << shift;
835 val = regs->gpr[rT] >> shift;
836 if (put_user(val, (u8 __user *)EA))
840 /* move EA to next address */
844 /* manage our position within the register */
855 static int emulate_popcntb_inst(struct pt_regs *regs, u32 instword)
860 ra = (instword >> 16) & 0x1f;
861 rs = (instword >> 21) & 0x1f;
864 tmp = tmp - ((tmp >> 1) & 0x5555555555555555ULL);
865 tmp = (tmp & 0x3333333333333333ULL) + ((tmp >> 2) & 0x3333333333333333ULL);
866 tmp = (tmp + (tmp >> 4)) & 0x0f0f0f0f0f0f0f0fULL;
872 static int emulate_isel(struct pt_regs *regs, u32 instword)
874 u8 rT = (instword >> 21) & 0x1f;
875 u8 rA = (instword >> 16) & 0x1f;
876 u8 rB = (instword >> 11) & 0x1f;
877 u8 BC = (instword >> 6) & 0x1f;
881 tmp = (rA == 0) ? 0 : regs->gpr[rA];
882 bit = (regs->ccr >> (31 - BC)) & 0x1;
884 regs->gpr[rT] = bit ? tmp : regs->gpr[rB];
889 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
890 static inline bool tm_abort_check(struct pt_regs *regs, int cause)
892 /* If we're emulating a load/store in an active transaction, we cannot
893 * emulate it as the kernel operates in transaction suspended context.
894 * We need to abort the transaction. This creates a persistent TM
895 * abort so tell the user what caused it with a new code.
897 if (MSR_TM_TRANSACTIONAL(regs->msr)) {
905 static inline bool tm_abort_check(struct pt_regs *regs, int reason)
911 static int emulate_instruction(struct pt_regs *regs)
916 if (!user_mode(regs))
918 CHECK_FULL_REGS(regs);
920 if (get_user(instword, (u32 __user *)(regs->nip)))
923 /* Emulate the mfspr rD, PVR. */
924 if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
925 PPC_WARN_EMULATED(mfpvr, regs);
926 rd = (instword >> 21) & 0x1f;
927 regs->gpr[rd] = mfspr(SPRN_PVR);
931 /* Emulating the dcba insn is just a no-op. */
932 if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
933 PPC_WARN_EMULATED(dcba, regs);
937 /* Emulate the mcrxr insn. */
938 if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
939 int shift = (instword >> 21) & 0x1c;
940 unsigned long msk = 0xf0000000UL >> shift;
942 PPC_WARN_EMULATED(mcrxr, regs);
943 regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
944 regs->xer &= ~0xf0000000UL;
948 /* Emulate load/store string insn. */
949 if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
950 if (tm_abort_check(regs,
951 TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
953 PPC_WARN_EMULATED(string, regs);
954 return emulate_string_inst(regs, instword);
957 /* Emulate the popcntb (Population Count Bytes) instruction. */
958 if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
959 PPC_WARN_EMULATED(popcntb, regs);
960 return emulate_popcntb_inst(regs, instword);
963 /* Emulate isel (Integer Select) instruction */
964 if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
965 PPC_WARN_EMULATED(isel, regs);
966 return emulate_isel(regs, instword);
969 /* Emulate sync instruction variants */
970 if ((instword & PPC_INST_SYNC_MASK) == PPC_INST_SYNC) {
971 PPC_WARN_EMULATED(sync, regs);
972 asm volatile("sync");
977 /* Emulate the mfspr rD, DSCR. */
978 if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
979 PPC_INST_MFSPR_DSCR_USER) ||
980 ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
981 PPC_INST_MFSPR_DSCR)) &&
982 cpu_has_feature(CPU_FTR_DSCR)) {
983 PPC_WARN_EMULATED(mfdscr, regs);
984 rd = (instword >> 21) & 0x1f;
985 regs->gpr[rd] = mfspr(SPRN_DSCR);
988 /* Emulate the mtspr DSCR, rD. */
989 if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
990 PPC_INST_MTSPR_DSCR_USER) ||
991 ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
992 PPC_INST_MTSPR_DSCR)) &&
993 cpu_has_feature(CPU_FTR_DSCR)) {
994 PPC_WARN_EMULATED(mtdscr, regs);
995 rd = (instword >> 21) & 0x1f;
996 current->thread.dscr = regs->gpr[rd];
997 current->thread.dscr_inherit = 1;
998 mtspr(SPRN_DSCR, current->thread.dscr);
1006 int is_valid_bugaddr(unsigned long addr)
1008 return is_kernel_addr(addr);
1011 #ifdef CONFIG_MATH_EMULATION
1012 static int emulate_math(struct pt_regs *regs)
1015 extern int do_mathemu(struct pt_regs *regs);
1017 ret = do_mathemu(regs);
1019 PPC_WARN_EMULATED(math, regs);
1023 emulate_single_step(regs);
1027 code = __parse_fpscr(current->thread.fp_state.fpscr);
1028 _exception(SIGFPE, regs, code, regs->nip);
1032 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1039 static inline int emulate_math(struct pt_regs *regs) { return -1; }
1042 void program_check_exception(struct pt_regs *regs)
1044 enum ctx_state prev_state = exception_enter();
1045 unsigned int reason = get_reason(regs);
1047 /* We can now get here via a FP Unavailable exception if the core
1048 * has no FPU, in that case the reason flags will be 0 */
1050 if (reason & REASON_FP) {
1051 /* IEEE FP exception */
1055 if (reason & REASON_TRAP) {
1056 unsigned long bugaddr;
1057 /* Debugger is first in line to stop recursive faults in
1058 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1059 if (debugger_bpt(regs))
1062 if (kprobe_handler(regs))
1065 /* trap exception */
1066 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1070 bugaddr = regs->nip;
1072 * Fixup bugaddr for BUG_ON() in real mode
1074 if (!is_kernel_addr(bugaddr) && !(regs->msr & MSR_IR))
1075 bugaddr += PAGE_OFFSET;
1077 if (!(regs->msr & MSR_PR) && /* not user-mode */
1078 report_bug(bugaddr, regs) == BUG_TRAP_TYPE_WARN) {
1082 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1085 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1086 if (reason & REASON_TM) {
1087 /* This is a TM "Bad Thing Exception" program check.
1089 * - An rfid/hrfid/mtmsrd attempts to cause an illegal
1090 * transition in TM states.
1091 * - A trechkpt is attempted when transactional.
1092 * - A treclaim is attempted when non transactional.
1093 * - A tend is illegally attempted.
1094 * - writing a TM SPR when transactional.
1096 if (!user_mode(regs) &&
1097 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1101 /* If usermode caused this, it's done something illegal and
1102 * gets a SIGILL slap on the wrist. We call it an illegal
1103 * operand to distinguish from the instruction just being bad
1104 * (e.g. executing a 'tend' on a CPU without TM!); it's an
1105 * illegal /placement/ of a valid instruction.
1107 if (user_mode(regs)) {
1108 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1111 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1112 "at %lx (msr 0x%x)\n", regs->nip, reason);
1113 die("Unrecoverable exception", regs, SIGABRT);
1119 * If we took the program check in the kernel skip down to sending a
1120 * SIGILL. The subsequent cases all relate to emulating instructions
1121 * which we should only do for userspace. We also do not want to enable
1122 * interrupts for kernel faults because that might lead to further
1123 * faults, and loose the context of the original exception.
1125 if (!user_mode(regs))
1128 /* We restore the interrupt state now */
1129 if (!arch_irq_disabled_regs(regs))
1132 /* (reason & REASON_ILLEGAL) would be the obvious thing here,
1133 * but there seems to be a hardware bug on the 405GP (RevD)
1134 * that means ESR is sometimes set incorrectly - either to
1135 * ESR_DST (!?) or 0. In the process of chasing this with the
1136 * hardware people - not sure if it can happen on any illegal
1137 * instruction or only on FP instructions, whether there is a
1138 * pattern to occurrences etc. -dgibson 31/Mar/2003
1140 if (!emulate_math(regs))
1143 /* Try to emulate it if we should. */
1144 if (reason & (REASON_ILLEGAL | REASON_PRIVILEGED)) {
1145 switch (emulate_instruction(regs)) {
1148 emulate_single_step(regs);
1151 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1157 if (reason & REASON_PRIVILEGED)
1158 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1160 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1163 exception_exit(prev_state);
1165 NOKPROBE_SYMBOL(program_check_exception);
1168 * This occurs when running in hypervisor mode on POWER6 or later
1169 * and an illegal instruction is encountered.
1171 void emulation_assist_interrupt(struct pt_regs *regs)
1173 regs->msr |= REASON_ILLEGAL;
1174 program_check_exception(regs);
1176 NOKPROBE_SYMBOL(emulation_assist_interrupt);
1178 void alignment_exception(struct pt_regs *regs)
1180 enum ctx_state prev_state = exception_enter();
1181 int sig, code, fixed = 0;
1183 /* We restore the interrupt state now */
1184 if (!arch_irq_disabled_regs(regs))
1187 if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT))
1190 /* we don't implement logging of alignment exceptions */
1191 if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS))
1192 fixed = fix_alignment(regs);
1195 regs->nip += 4; /* skip over emulated instruction */
1196 emulate_single_step(regs);
1200 /* Operand address was bad */
1201 if (fixed == -EFAULT) {
1208 if (user_mode(regs))
1209 _exception(sig, regs, code, regs->dar);
1211 bad_page_fault(regs, regs->dar, sig);
1214 exception_exit(prev_state);
1217 void slb_miss_bad_addr(struct pt_regs *regs)
1219 enum ctx_state prev_state = exception_enter();
1221 if (user_mode(regs))
1222 _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
1224 bad_page_fault(regs, regs->dar, SIGSEGV);
1226 exception_exit(prev_state);
1229 void StackOverflow(struct pt_regs *regs)
1231 printk(KERN_CRIT "Kernel stack overflow in process %p, r1=%lx\n",
1232 current, regs->gpr[1]);
1235 panic("kernel stack overflow");
1238 void nonrecoverable_exception(struct pt_regs *regs)
1240 printk(KERN_ERR "Non-recoverable exception at PC=%lx MSR=%lx\n",
1241 regs->nip, regs->msr);
1243 die("nonrecoverable exception", regs, SIGKILL);
1246 void kernel_fp_unavailable_exception(struct pt_regs *regs)
1248 enum ctx_state prev_state = exception_enter();
1250 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1251 "%lx at %lx\n", regs->trap, regs->nip);
1252 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1254 exception_exit(prev_state);
1257 void altivec_unavailable_exception(struct pt_regs *regs)
1259 enum ctx_state prev_state = exception_enter();
1261 if (user_mode(regs)) {
1262 /* A user program has executed an altivec instruction,
1263 but this kernel doesn't support altivec. */
1264 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1268 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1269 "%lx at %lx\n", regs->trap, regs->nip);
1270 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1273 exception_exit(prev_state);
1276 void vsx_unavailable_exception(struct pt_regs *regs)
1278 if (user_mode(regs)) {
1279 /* A user program has executed an vsx instruction,
1280 but this kernel doesn't support vsx. */
1281 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1285 printk(KERN_EMERG "Unrecoverable VSX Unavailable Exception "
1286 "%lx at %lx\n", regs->trap, regs->nip);
1287 die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
1291 static void tm_unavailable(struct pt_regs *regs)
1293 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1294 if (user_mode(regs)) {
1295 current->thread.load_tm++;
1296 regs->msr |= MSR_TM;
1298 tm_restore_sprs(¤t->thread);
1302 pr_emerg("Unrecoverable TM Unavailable Exception "
1303 "%lx at %lx\n", regs->trap, regs->nip);
1304 die("Unrecoverable TM Unavailable Exception", regs, SIGABRT);
1307 void facility_unavailable_exception(struct pt_regs *regs)
1309 static char *facility_strings[] = {
1310 [FSCR_FP_LG] = "FPU",
1311 [FSCR_VECVSX_LG] = "VMX/VSX",
1312 [FSCR_DSCR_LG] = "DSCR",
1313 [FSCR_PM_LG] = "PMU SPRs",
1314 [FSCR_BHRB_LG] = "BHRB",
1315 [FSCR_TM_LG] = "TM",
1316 [FSCR_EBB_LG] = "EBB",
1317 [FSCR_TAR_LG] = "TAR",
1318 [FSCR_MSGP_LG] = "MSGP",
1319 [FSCR_SCV_LG] = "SCV",
1321 char *facility = "unknown";
1327 hv = (regs->trap == 0xf80);
1329 value = mfspr(SPRN_HFSCR);
1331 value = mfspr(SPRN_FSCR);
1333 status = value >> 56;
1334 if (status == FSCR_DSCR_LG) {
1336 * User is accessing the DSCR register using the problem
1337 * state only SPR number (0x03) either through a mfspr or
1338 * a mtspr instruction. If it is a write attempt through
1339 * a mtspr, then we set the inherit bit. This also allows
1340 * the user to write or read the register directly in the
1341 * future by setting via the FSCR DSCR bit. But in case it
1342 * is a read DSCR attempt through a mfspr instruction, we
1343 * just emulate the instruction instead. This code path will
1344 * always emulate all the mfspr instructions till the user
1345 * has attempted at least one mtspr instruction. This way it
1346 * preserves the same behaviour when the user is accessing
1347 * the DSCR through privilege level only SPR number (0x11)
1348 * which is emulated through illegal instruction exception.
1349 * We always leave HFSCR DSCR set.
1351 if (get_user(instword, (u32 __user *)(regs->nip))) {
1352 pr_err("Failed to fetch the user instruction\n");
1356 /* Write into DSCR (mtspr 0x03, RS) */
1357 if ((instword & PPC_INST_MTSPR_DSCR_USER_MASK)
1358 == PPC_INST_MTSPR_DSCR_USER) {
1359 rd = (instword >> 21) & 0x1f;
1360 current->thread.dscr = regs->gpr[rd];
1361 current->thread.dscr_inherit = 1;
1362 current->thread.fscr |= FSCR_DSCR;
1363 mtspr(SPRN_FSCR, current->thread.fscr);
1366 /* Read from DSCR (mfspr RT, 0x03) */
1367 if ((instword & PPC_INST_MFSPR_DSCR_USER_MASK)
1368 == PPC_INST_MFSPR_DSCR_USER) {
1369 if (emulate_instruction(regs)) {
1370 pr_err("DSCR based mfspr emulation failed\n");
1374 emulate_single_step(regs);
1379 if (status == FSCR_TM_LG) {
1381 * If we're here then the hardware is TM aware because it
1382 * generated an exception with FSRM_TM set.
1384 * If cpu_has_feature(CPU_FTR_TM) is false, then either firmware
1385 * told us not to do TM, or the kernel is not built with TM
1388 * If both of those things are true, then userspace can spam the
1389 * console by triggering the printk() below just by continually
1390 * doing tbegin (or any TM instruction). So in that case just
1391 * send the process a SIGILL immediately.
1393 if (!cpu_has_feature(CPU_FTR_TM))
1396 tm_unavailable(regs);
1400 if ((hv || status >= 2) &&
1401 (status < ARRAY_SIZE(facility_strings)) &&
1402 facility_strings[status])
1403 facility = facility_strings[status];
1405 /* We restore the interrupt state now */
1406 if (!arch_irq_disabled_regs(regs))
1409 pr_err_ratelimited("%sFacility '%s' unavailable (%d), exception at 0x%lx, MSR=%lx\n",
1410 hv ? "Hypervisor " : "", facility, status, regs->nip, regs->msr);
1413 if (user_mode(regs)) {
1414 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1418 die("Unexpected facility unavailable exception", regs, SIGABRT);
1422 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1424 void fp_unavailable_tm(struct pt_regs *regs)
1426 /* Note: This does not handle any kind of FP laziness. */
1428 TM_DEBUG("FP Unavailable trap whilst transactional at 0x%lx, MSR=%lx\n",
1429 regs->nip, regs->msr);
1431 /* We can only have got here if the task started using FP after
1432 * beginning the transaction. So, the transactional regs are just a
1433 * copy of the checkpointed ones. But, we still need to recheckpoint
1434 * as we're enabling FP for the process; it will return, abort the
1435 * transaction, and probably retry but now with FP enabled. So the
1436 * checkpointed FP registers need to be loaded.
1438 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1439 /* Reclaim didn't save out any FPRs to transact_fprs. */
1441 /* Enable FP for the task: */
1442 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
1444 /* This loads and recheckpoints the FP registers from
1445 * thread.fpr[]. They will remain in registers after the
1446 * checkpoint so we don't need to reload them after.
1447 * If VMX is in use, the VRs now hold checkpointed values,
1448 * so we don't want to load the VRs from the thread_struct.
1450 tm_recheckpoint(¤t->thread, MSR_FP);
1452 /* If VMX is in use, get the transactional values back */
1453 if (regs->msr & MSR_VEC) {
1454 msr_check_and_set(MSR_VEC);
1455 load_vr_state(¤t->thread.vr_state);
1456 /* At this point all the VSX state is loaded, so enable it */
1457 regs->msr |= MSR_VSX;
1461 void altivec_unavailable_tm(struct pt_regs *regs)
1463 /* See the comments in fp_unavailable_tm(). This function operates
1467 TM_DEBUG("Vector Unavailable trap whilst transactional at 0x%lx,"
1469 regs->nip, regs->msr);
1470 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1471 regs->msr |= MSR_VEC;
1472 tm_recheckpoint(¤t->thread, MSR_VEC);
1473 current->thread.used_vr = 1;
1475 if (regs->msr & MSR_FP) {
1476 msr_check_and_set(MSR_FP);
1477 load_fp_state(¤t->thread.fp_state);
1478 regs->msr |= MSR_VSX;
1482 void vsx_unavailable_tm(struct pt_regs *regs)
1484 unsigned long orig_msr = regs->msr;
1486 /* See the comments in fp_unavailable_tm(). This works similarly,
1487 * though we're loading both FP and VEC registers in here.
1489 * If FP isn't in use, load FP regs. If VEC isn't in use, load VEC
1490 * regs. Either way, set MSR_VSX.
1493 TM_DEBUG("VSX Unavailable trap whilst transactional at 0x%lx,"
1495 regs->nip, regs->msr);
1497 current->thread.used_vsr = 1;
1499 /* If FP and VMX are already loaded, we have all the state we need */
1500 if ((orig_msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC)) {
1501 regs->msr |= MSR_VSX;
1505 /* This reclaims FP and/or VR regs if they're already enabled */
1506 tm_reclaim_current(TM_CAUSE_FAC_UNAV);
1508 regs->msr |= MSR_VEC | MSR_FP | current->thread.fpexc_mode |
1511 /* This loads & recheckpoints FP and VRs; but we have
1512 * to be sure not to overwrite previously-valid state.
1514 tm_recheckpoint(¤t->thread, regs->msr & ~orig_msr);
1516 msr_check_and_set(orig_msr & (MSR_FP | MSR_VEC));
1518 if (orig_msr & MSR_FP)
1519 load_fp_state(¤t->thread.fp_state);
1520 if (orig_msr & MSR_VEC)
1521 load_vr_state(¤t->thread.vr_state);
1523 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1525 void performance_monitor_exception(struct pt_regs *regs)
1527 __this_cpu_inc(irq_stat.pmu_irqs);
1532 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1533 static void handle_debug(struct pt_regs *regs, unsigned long debug_status)
1537 * Determine the cause of the debug event, clear the
1538 * event flags and send a trap to the handler. Torez
1540 if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
1541 dbcr_dac(current) &= ~(DBCR_DAC1R | DBCR_DAC1W);
1542 #ifdef CONFIG_PPC_ADV_DEBUG_DAC_RANGE
1543 current->thread.debug.dbcr2 &= ~DBCR2_DAC12MODE;
1545 do_send_trap(regs, mfspr(SPRN_DAC1), debug_status, TRAP_HWBKPT,
1548 } else if (debug_status & (DBSR_DAC2R | DBSR_DAC2W)) {
1549 dbcr_dac(current) &= ~(DBCR_DAC2R | DBCR_DAC2W);
1550 do_send_trap(regs, mfspr(SPRN_DAC2), debug_status, TRAP_HWBKPT,
1553 } else if (debug_status & DBSR_IAC1) {
1554 current->thread.debug.dbcr0 &= ~DBCR0_IAC1;
1555 dbcr_iac_range(current) &= ~DBCR_IAC12MODE;
1556 do_send_trap(regs, mfspr(SPRN_IAC1), debug_status, TRAP_HWBKPT,
1559 } else if (debug_status & DBSR_IAC2) {
1560 current->thread.debug.dbcr0 &= ~DBCR0_IAC2;
1561 do_send_trap(regs, mfspr(SPRN_IAC2), debug_status, TRAP_HWBKPT,
1564 } else if (debug_status & DBSR_IAC3) {
1565 current->thread.debug.dbcr0 &= ~DBCR0_IAC3;
1566 dbcr_iac_range(current) &= ~DBCR_IAC34MODE;
1567 do_send_trap(regs, mfspr(SPRN_IAC3), debug_status, TRAP_HWBKPT,
1570 } else if (debug_status & DBSR_IAC4) {
1571 current->thread.debug.dbcr0 &= ~DBCR0_IAC4;
1572 do_send_trap(regs, mfspr(SPRN_IAC4), debug_status, TRAP_HWBKPT,
1577 * At the point this routine was called, the MSR(DE) was turned off.
1578 * Check all other debug flags and see if that bit needs to be turned
1581 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1582 current->thread.debug.dbcr1))
1583 regs->msr |= MSR_DE;
1585 /* Make sure the IDM flag is off */
1586 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1589 mtspr(SPRN_DBCR0, current->thread.debug.dbcr0);
1592 void DebugException(struct pt_regs *regs, unsigned long debug_status)
1594 current->thread.debug.dbsr = debug_status;
1596 /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
1597 * on server, it stops on the target of the branch. In order to simulate
1598 * the server behaviour, we thus restart right away with a single step
1599 * instead of stopping here when hitting a BT
1601 if (debug_status & DBSR_BT) {
1602 regs->msr &= ~MSR_DE;
1605 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
1606 /* Clear the BT event */
1607 mtspr(SPRN_DBSR, DBSR_BT);
1609 /* Do the single step trick only when coming from userspace */
1610 if (user_mode(regs)) {
1611 current->thread.debug.dbcr0 &= ~DBCR0_BT;
1612 current->thread.debug.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1613 regs->msr |= MSR_DE;
1617 if (kprobe_post_handler(regs))
1620 if (notify_die(DIE_SSTEP, "block_step", regs, 5,
1621 5, SIGTRAP) == NOTIFY_STOP) {
1624 if (debugger_sstep(regs))
1626 } else if (debug_status & DBSR_IC) { /* Instruction complete */
1627 regs->msr &= ~MSR_DE;
1629 /* Disable instruction completion */
1630 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_IC);
1631 /* Clear the instruction completion event */
1632 mtspr(SPRN_DBSR, DBSR_IC);
1634 if (kprobe_post_handler(regs))
1637 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
1638 5, SIGTRAP) == NOTIFY_STOP) {
1642 if (debugger_sstep(regs))
1645 if (user_mode(regs)) {
1646 current->thread.debug.dbcr0 &= ~DBCR0_IC;
1647 if (DBCR_ACTIVE_EVENTS(current->thread.debug.dbcr0,
1648 current->thread.debug.dbcr1))
1649 regs->msr |= MSR_DE;
1651 /* Make sure the IDM bit is off */
1652 current->thread.debug.dbcr0 &= ~DBCR0_IDM;
1655 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
1657 handle_debug(regs, debug_status);
1659 NOKPROBE_SYMBOL(DebugException);
1660 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
1662 #if !defined(CONFIG_TAU_INT)
1663 void TAUException(struct pt_regs *regs)
1665 printk("TAU trap at PC: %lx, MSR: %lx, vector=%lx %s\n",
1666 regs->nip, regs->msr, regs->trap, print_tainted());
1668 #endif /* CONFIG_INT_TAU */
1670 #ifdef CONFIG_ALTIVEC
1671 void altivec_assist_exception(struct pt_regs *regs)
1675 if (!user_mode(regs)) {
1676 printk(KERN_EMERG "VMX/Altivec assist exception in kernel mode"
1677 " at %lx\n", regs->nip);
1678 die("Kernel VMX/Altivec assist exception", regs, SIGILL);
1681 flush_altivec_to_thread(current);
1683 PPC_WARN_EMULATED(altivec, regs);
1684 err = emulate_altivec(regs);
1686 regs->nip += 4; /* skip emulated instruction */
1687 emulate_single_step(regs);
1691 if (err == -EFAULT) {
1692 /* got an error reading the instruction */
1693 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1695 /* didn't recognize the instruction */
1696 /* XXX quick hack for now: set the non-Java bit in the VSCR */
1697 printk_ratelimited(KERN_ERR "Unrecognized altivec instruction "
1698 "in %s at %lx\n", current->comm, regs->nip);
1699 current->thread.vr_state.vscr.u[3] |= 0x10000;
1702 #endif /* CONFIG_ALTIVEC */
1704 #ifdef CONFIG_FSL_BOOKE
1705 void CacheLockingException(struct pt_regs *regs, unsigned long address,
1706 unsigned long error_code)
1708 /* We treat cache locking instructions from the user
1709 * as priv ops, in the future we could try to do
1712 if (error_code & (ESR_DLK|ESR_ILK))
1713 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1716 #endif /* CONFIG_FSL_BOOKE */
1719 void SPEFloatingPointException(struct pt_regs *regs)
1721 extern int do_spe_mathemu(struct pt_regs *regs);
1722 unsigned long spefscr;
1727 flush_spe_to_thread(current);
1729 spefscr = current->thread.spefscr;
1730 fpexc_mode = current->thread.fpexc_mode;
1732 if ((spefscr & SPEFSCR_FOVF) && (fpexc_mode & PR_FP_EXC_OVF)) {
1735 else if ((spefscr & SPEFSCR_FUNF) && (fpexc_mode & PR_FP_EXC_UND)) {
1738 else if ((spefscr & SPEFSCR_FDBZ) && (fpexc_mode & PR_FP_EXC_DIV))
1740 else if ((spefscr & SPEFSCR_FINV) && (fpexc_mode & PR_FP_EXC_INV)) {
1743 else if ((spefscr & (SPEFSCR_FG | SPEFSCR_FX)) && (fpexc_mode & PR_FP_EXC_RES))
1746 err = do_spe_mathemu(regs);
1748 regs->nip += 4; /* skip emulated instruction */
1749 emulate_single_step(regs);
1753 if (err == -EFAULT) {
1754 /* got an error reading the instruction */
1755 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1756 } else if (err == -EINVAL) {
1757 /* didn't recognize the instruction */
1758 printk(KERN_ERR "unrecognized spe instruction "
1759 "in %s at %lx\n", current->comm, regs->nip);
1761 _exception(SIGFPE, regs, code, regs->nip);
1767 void SPEFloatingPointRoundException(struct pt_regs *regs)
1769 extern int speround_handler(struct pt_regs *regs);
1773 if (regs->msr & MSR_SPE)
1774 giveup_spe(current);
1778 err = speround_handler(regs);
1780 regs->nip += 4; /* skip emulated instruction */
1781 emulate_single_step(regs);
1785 if (err == -EFAULT) {
1786 /* got an error reading the instruction */
1787 _exception(SIGSEGV, regs, SEGV_ACCERR, regs->nip);
1788 } else if (err == -EINVAL) {
1789 /* didn't recognize the instruction */
1790 printk(KERN_ERR "unrecognized spe instruction "
1791 "in %s at %lx\n", current->comm, regs->nip);
1793 _exception(SIGFPE, regs, 0, regs->nip);
1800 * We enter here if we get an unrecoverable exception, that is, one
1801 * that happened at a point where the RI (recoverable interrupt) bit
1802 * in the MSR is 0. This indicates that SRR0/1 are live, and that
1803 * we therefore lost state by taking this exception.
1805 void unrecoverable_exception(struct pt_regs *regs)
1807 printk(KERN_EMERG "Unrecoverable exception %lx at %lx\n",
1808 regs->trap, regs->nip);
1809 die("Unrecoverable exception", regs, SIGABRT);
1811 NOKPROBE_SYMBOL(unrecoverable_exception);
1813 #if defined(CONFIG_BOOKE_WDT) || defined(CONFIG_40x)
1815 * Default handler for a Watchdog exception,
1816 * spins until a reboot occurs
1818 void __attribute__ ((weak)) WatchdogHandler(struct pt_regs *regs)
1820 /* Generic WatchdogHandler, implement your own */
1821 mtspr(SPRN_TCR, mfspr(SPRN_TCR)&(~TCR_WIE));
1825 void WatchdogException(struct pt_regs *regs)
1827 printk (KERN_EMERG "PowerPC Book-E Watchdog Exception\n");
1828 WatchdogHandler(regs);
1833 * We enter here if we discover during exception entry that we are
1834 * running in supervisor mode with a userspace value in the stack pointer.
1836 void kernel_bad_stack(struct pt_regs *regs)
1838 printk(KERN_EMERG "Bad kernel stack pointer %lx at %lx\n",
1839 regs->gpr[1], regs->nip);
1840 die("Bad kernel stack pointer", regs, SIGABRT);
1842 NOKPROBE_SYMBOL(kernel_bad_stack);
1844 void __init trap_init(void)
1849 #ifdef CONFIG_PPC_EMULATED_STATS
1851 #define WARN_EMULATED_SETUP(type) .type = { .name = #type }
1853 struct ppc_emulated ppc_emulated = {
1854 #ifdef CONFIG_ALTIVEC
1855 WARN_EMULATED_SETUP(altivec),
1857 WARN_EMULATED_SETUP(dcba),
1858 WARN_EMULATED_SETUP(dcbz),
1859 WARN_EMULATED_SETUP(fp_pair),
1860 WARN_EMULATED_SETUP(isel),
1861 WARN_EMULATED_SETUP(mcrxr),
1862 WARN_EMULATED_SETUP(mfpvr),
1863 WARN_EMULATED_SETUP(multiple),
1864 WARN_EMULATED_SETUP(popcntb),
1865 WARN_EMULATED_SETUP(spe),
1866 WARN_EMULATED_SETUP(string),
1867 WARN_EMULATED_SETUP(sync),
1868 WARN_EMULATED_SETUP(unaligned),
1869 #ifdef CONFIG_MATH_EMULATION
1870 WARN_EMULATED_SETUP(math),
1873 WARN_EMULATED_SETUP(vsx),
1876 WARN_EMULATED_SETUP(mfdscr),
1877 WARN_EMULATED_SETUP(mtdscr),
1878 WARN_EMULATED_SETUP(lq_stq),
1882 u32 ppc_warn_emulated;
1884 void ppc_warn_emulated_print(const char *type)
1886 pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm,
1890 static int __init ppc_warn_emulated_init(void)
1892 struct dentry *dir, *d;
1894 struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
1896 if (!powerpc_debugfs_root)
1899 dir = debugfs_create_dir("emulated_instructions",
1900 powerpc_debugfs_root);
1904 d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
1905 &ppc_warn_emulated);
1909 for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
1910 d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
1911 (u32 *)&entries[i].val.counter);
1919 debugfs_remove_recursive(dir);
1923 device_initcall(ppc_warn_emulated_init);
1925 #endif /* CONFIG_PPC_EMULATED_STATS */