2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/kallsyms.h>
22 #include <linux/bootmem.h>
23 #include <linux/interrupt.h>
25 #include <asm/bootinfo.h>
26 #include <asm/branch.h>
27 #include <asm/break.h>
31 #include <asm/mipsregs.h>
32 #include <asm/mipsmtregs.h>
33 #include <asm/module.h>
34 #include <asm/pgtable.h>
35 #include <asm/ptrace.h>
36 #include <asm/sections.h>
37 #include <asm/system.h>
38 #include <asm/tlbdebug.h>
39 #include <asm/traps.h>
40 #include <asm/uaccess.h>
41 #include <asm/mmu_context.h>
42 #include <asm/watch.h>
43 #include <asm/types.h>
44 #include <asm/stacktrace.h>
46 extern asmlinkage void handle_int(void);
47 extern asmlinkage void handle_tlbm(void);
48 extern asmlinkage void handle_tlbl(void);
49 extern asmlinkage void handle_tlbs(void);
50 extern asmlinkage void handle_adel(void);
51 extern asmlinkage void handle_ades(void);
52 extern asmlinkage void handle_ibe(void);
53 extern asmlinkage void handle_dbe(void);
54 extern asmlinkage void handle_sys(void);
55 extern asmlinkage void handle_bp(void);
56 extern asmlinkage void handle_ri(void);
57 extern asmlinkage void handle_cpu(void);
58 extern asmlinkage void handle_ov(void);
59 extern asmlinkage void handle_tr(void);
60 extern asmlinkage void handle_fpe(void);
61 extern asmlinkage void handle_mdmx(void);
62 extern asmlinkage void handle_watch(void);
63 extern asmlinkage void handle_mt(void);
64 extern asmlinkage void handle_dsp(void);
65 extern asmlinkage void handle_mcheck(void);
66 extern asmlinkage void handle_reserved(void);
68 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
69 struct mips_fpu_struct *ctx);
71 void (*board_be_init)(void);
72 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
73 void (*board_nmi_handler_setup)(void);
74 void (*board_ejtag_handler_setup)(void);
75 void (*board_bind_eic_interrupt)(int irq, int regset);
78 static void show_raw_backtrace(unsigned long reg29)
80 unsigned long *sp = (unsigned long *)reg29;
83 printk("Call Trace:");
84 #ifdef CONFIG_KALLSYMS
87 while (!kstack_end(sp)) {
89 if (__kernel_text_address(addr))
95 #ifdef CONFIG_KALLSYMS
97 static int __init set_raw_show_trace(char *str)
102 __setup("raw_show_trace", set_raw_show_trace);
105 static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
107 unsigned long sp = regs->regs[29];
108 unsigned long ra = regs->regs[31];
109 unsigned long pc = regs->cp0_epc;
111 if (raw_show_trace || !__kernel_text_address(pc)) {
112 show_raw_backtrace(sp);
115 printk("Call Trace:\n");
118 pc = unwind_stack(task, &sp, pc, ra);
125 * This routine abuses get_user()/put_user() to reference pointers
126 * with at least a bit of error checking ...
128 static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
130 const int field = 2 * sizeof(unsigned long);
133 unsigned long *sp = (unsigned long *)regs->regs[29];
137 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
138 if (i && ((i % (64 / field)) == 0))
145 if (__get_user(stackdata, sp++)) {
146 printk(" (Bad stack address)");
150 printk(" %0*lx", field, stackdata);
154 show_backtrace(task, regs);
157 void show_stack(struct task_struct *task, unsigned long *sp)
161 regs.regs[29] = (unsigned long)sp;
165 if (task && task != current) {
166 regs.regs[29] = task->thread.reg29;
168 regs.cp0_epc = task->thread.reg31;
170 prepare_frametrace(®s);
173 show_stacktrace(task, ®s);
177 * The architecture-independent dump_stack generator
179 void dump_stack(void)
183 prepare_frametrace(®s);
184 show_backtrace(current, ®s);
187 EXPORT_SYMBOL(dump_stack);
189 void show_code(unsigned int *pc)
195 for(i = -3 ; i < 6 ; i++) {
197 if (__get_user(insn, pc + i)) {
198 printk(" (Bad address in epc)\n");
201 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
205 void show_regs(struct pt_regs *regs)
207 const int field = 2 * sizeof(unsigned long);
208 unsigned int cause = regs->cp0_cause;
211 printk("Cpu %d\n", smp_processor_id());
214 * Saved main processor registers
216 for (i = 0; i < 32; ) {
220 printk(" %0*lx", field, 0UL);
221 else if (i == 26 || i == 27)
222 printk(" %*s", field, "");
224 printk(" %0*lx", field, regs->regs[i]);
231 printk("Hi : %0*lx\n", field, regs->hi);
232 printk("Lo : %0*lx\n", field, regs->lo);
235 * Saved cp0 registers
237 printk("epc : %0*lx ", field, regs->cp0_epc);
238 print_symbol("%s ", regs->cp0_epc);
239 printk(" %s\n", print_tainted());
240 printk("ra : %0*lx ", field, regs->regs[31]);
241 print_symbol("%s\n", regs->regs[31]);
243 printk("Status: %08x ", (uint32_t) regs->cp0_status);
245 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
246 if (regs->cp0_status & ST0_KUO)
248 if (regs->cp0_status & ST0_IEO)
250 if (regs->cp0_status & ST0_KUP)
252 if (regs->cp0_status & ST0_IEP)
254 if (regs->cp0_status & ST0_KUC)
256 if (regs->cp0_status & ST0_IEC)
259 if (regs->cp0_status & ST0_KX)
261 if (regs->cp0_status & ST0_SX)
263 if (regs->cp0_status & ST0_UX)
265 switch (regs->cp0_status & ST0_KSU) {
270 printk("SUPERVISOR ");
279 if (regs->cp0_status & ST0_ERL)
281 if (regs->cp0_status & ST0_EXL)
283 if (regs->cp0_status & ST0_IE)
288 printk("Cause : %08x\n", cause);
290 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
291 if (1 <= cause && cause <= 5)
292 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
294 printk("PrId : %08x\n", read_c0_prid());
297 void show_registers(struct pt_regs *regs)
301 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
302 current->comm, current->pid, current_thread_info(), current);
303 show_stacktrace(current, regs);
304 show_code((unsigned int *) regs->cp0_epc);
308 static DEFINE_SPINLOCK(die_lock);
310 NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
312 static int die_counter;
313 #ifdef CONFIG_MIPS_MT_SMTC
314 unsigned long dvpret = dvpe();
315 #endif /* CONFIG_MIPS_MT_SMTC */
318 spin_lock_irq(&die_lock);
320 #ifdef CONFIG_MIPS_MT_SMTC
321 mips_mt_regdump(dvpret);
322 #endif /* CONFIG_MIPS_MT_SMTC */
323 printk("%s[#%d]:\n", str, ++die_counter);
324 show_registers(regs);
325 spin_unlock_irq(&die_lock);
328 panic("Fatal exception in interrupt");
331 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
333 panic("Fatal exception");
339 extern const struct exception_table_entry __start___dbe_table[];
340 extern const struct exception_table_entry __stop___dbe_table[];
342 void __declare_dbe_table(void)
344 __asm__ __volatile__(
345 ".section\t__dbe_table,\"a\"\n\t"
350 /* Given an address, look for it in the exception tables. */
351 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
353 const struct exception_table_entry *e;
355 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
357 e = search_module_dbetables(addr);
361 asmlinkage void do_be(struct pt_regs *regs)
363 const int field = 2 * sizeof(unsigned long);
364 const struct exception_table_entry *fixup = NULL;
365 int data = regs->cp0_cause & 4;
366 int action = MIPS_BE_FATAL;
368 /* XXX For now. Fixme, this searches the wrong table ... */
369 if (data && !user_mode(regs))
370 fixup = search_dbe_tables(exception_epc(regs));
373 action = MIPS_BE_FIXUP;
375 if (board_be_handler)
376 action = board_be_handler(regs, fixup != 0);
379 case MIPS_BE_DISCARD:
383 regs->cp0_epc = fixup->nextinsn;
392 * Assume it would be too dangerous to continue ...
394 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
395 data ? "Data" : "Instruction",
396 field, regs->cp0_epc, field, regs->regs[31]);
397 die_if_kernel("Oops", regs);
398 force_sig(SIGBUS, current);
401 static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
403 unsigned int __user *epc;
405 epc = (unsigned int __user *) regs->cp0_epc +
406 ((regs->cp0_cause & CAUSEF_BD) != 0);
407 if (!get_user(*opcode, epc))
410 force_sig(SIGSEGV, current);
418 #define OPCODE 0xfc000000
419 #define BASE 0x03e00000
420 #define RT 0x001f0000
421 #define OFFSET 0x0000ffff
422 #define LL 0xc0000000
423 #define SC 0xe0000000
424 #define SPEC3 0x7c000000
425 #define RD 0x0000f800
426 #define FUNC 0x0000003f
427 #define RDHWR 0x0000003b
430 * The ll_bit is cleared by r*_switch.S
433 unsigned long ll_bit;
435 static struct task_struct *ll_task = NULL;
437 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
439 unsigned long value, __user *vaddr;
444 * analyse the ll instruction that just caused a ri exception
445 * and put the referenced address to addr.
448 /* sign extend offset */
449 offset = opcode & OFFSET;
453 vaddr = (unsigned long __user *)
454 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
456 if ((unsigned long)vaddr & 3) {
460 if (get_user(value, vaddr)) {
467 if (ll_task == NULL || ll_task == current) {
476 compute_return_epc(regs);
478 regs->regs[(opcode & RT) >> 16] = value;
483 force_sig(signal, current);
486 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
488 unsigned long __user *vaddr;
494 * analyse the sc instruction that just caused a ri exception
495 * and put the referenced address to addr.
498 /* sign extend offset */
499 offset = opcode & OFFSET;
503 vaddr = (unsigned long __user *)
504 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
505 reg = (opcode & RT) >> 16;
507 if ((unsigned long)vaddr & 3) {
514 if (ll_bit == 0 || ll_task != current) {
515 compute_return_epc(regs);
523 if (put_user(regs->regs[reg], vaddr)) {
528 compute_return_epc(regs);
534 force_sig(signal, current);
538 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
539 * opcodes are supposed to result in coprocessor unusable exceptions if
540 * executed on ll/sc-less processors. That's the theory. In practice a
541 * few processors such as NEC's VR4100 throw reserved instruction exceptions
542 * instead, so we're doing the emulation thing in both exception handlers.
544 static inline int simulate_llsc(struct pt_regs *regs)
548 if (unlikely(get_insn_opcode(regs, &opcode)))
551 if ((opcode & OPCODE) == LL) {
552 simulate_ll(regs, opcode);
555 if ((opcode & OPCODE) == SC) {
556 simulate_sc(regs, opcode);
560 return -EFAULT; /* Strange things going on ... */
564 * Simulate trapping 'rdhwr' instructions to provide user accessible
565 * registers not implemented in hardware. The only current use of this
566 * is the thread area pointer.
568 static inline int simulate_rdhwr(struct pt_regs *regs)
570 struct thread_info *ti = task_thread_info(current);
573 if (unlikely(get_insn_opcode(regs, &opcode)))
576 if (unlikely(compute_return_epc(regs)))
579 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
580 int rd = (opcode & RD) >> 11;
581 int rt = (opcode & RT) >> 16;
584 regs->regs[rt] = ti->tp_value;
595 asmlinkage void do_ov(struct pt_regs *regs)
599 die_if_kernel("Integer overflow", regs);
601 info.si_code = FPE_INTOVF;
602 info.si_signo = SIGFPE;
604 info.si_addr = (void __user *) regs->cp0_epc;
605 force_sig_info(SIGFPE, &info, current);
609 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
611 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
613 die_if_kernel("FP exception in kernel code", regs);
615 if (fcr31 & FPU_CSR_UNI_X) {
620 #ifdef CONFIG_PREEMPT
621 if (!is_fpu_owner()) {
622 /* We might lose fpu before disabling preempt... */
624 BUG_ON(!used_math());
629 * Unimplemented operation exception. If we've got the full
630 * software emulator on-board, let's use it...
632 * Force FPU to dump state into task/thread context. We're
633 * moving a lot of data here for what is probably a single
634 * instruction, but the alternative is to pre-decode the FP
635 * register operands before invoking the emulator, which seems
636 * a bit extreme for what should be an infrequent event.
639 /* Ensure 'resume' not overwrite saved fp context again. */
644 /* Run the emulator */
645 sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu);
649 own_fpu(); /* Using the FPU again. */
651 * We can't allow the emulated instruction to leave any of
652 * the cause bit set in $fcr31.
654 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
656 /* Restore the hardware register state */
661 /* If something went wrong, signal */
663 force_sig(sig, current);
668 force_sig(SIGFPE, current);
671 asmlinkage void do_bp(struct pt_regs *regs)
673 unsigned int opcode, bcode;
676 die_if_kernel("Break instruction in kernel code", regs);
678 if (get_insn_opcode(regs, &opcode))
682 * There is the ancient bug in the MIPS assemblers that the break
683 * code starts left to bit 16 instead to bit 6 in the opcode.
684 * Gas is bug-compatible, but not always, grrr...
685 * We handle both cases with a simple heuristics. --macro
687 bcode = ((opcode >> 6) & ((1 << 20) - 1));
688 if (bcode < (1 << 10))
692 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
693 * insns, even for break codes that indicate arithmetic failures.
695 * But should we continue the brokenness??? --macro
698 case BRK_OVERFLOW << 10:
699 case BRK_DIVZERO << 10:
700 if (bcode == (BRK_DIVZERO << 10))
701 info.si_code = FPE_INTDIV;
703 info.si_code = FPE_INTOVF;
704 info.si_signo = SIGFPE;
706 info.si_addr = (void __user *) regs->cp0_epc;
707 force_sig_info(SIGFPE, &info, current);
710 force_sig(SIGTRAP, current);
714 asmlinkage void do_tr(struct pt_regs *regs)
716 unsigned int opcode, tcode = 0;
719 die_if_kernel("Trap instruction in kernel code", regs);
721 if (get_insn_opcode(regs, &opcode))
724 /* Immediate versions don't provide a code. */
725 if (!(opcode & OPCODE))
726 tcode = ((opcode >> 6) & ((1 << 10) - 1));
729 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
730 * insns, even for trap codes that indicate arithmetic failures.
732 * But should we continue the brokenness??? --macro
737 if (tcode == BRK_DIVZERO)
738 info.si_code = FPE_INTDIV;
740 info.si_code = FPE_INTOVF;
741 info.si_signo = SIGFPE;
743 info.si_addr = (void __user *) regs->cp0_epc;
744 force_sig_info(SIGFPE, &info, current);
747 force_sig(SIGTRAP, current);
751 asmlinkage void do_ri(struct pt_regs *regs)
753 die_if_kernel("Reserved instruction in kernel code", regs);
756 if (!simulate_llsc(regs))
759 if (!simulate_rdhwr(regs))
762 force_sig(SIGILL, current);
765 asmlinkage void do_cpu(struct pt_regs *regs)
769 die_if_kernel("do_cpu invoked from kernel context!", regs);
771 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
776 if (!simulate_llsc(regs))
779 if (!simulate_rdhwr(regs))
788 if (used_math()) { /* Using the FPU again. */
790 } else { /* First time FPU user. */
798 int sig = fpu_emulator_cop1Handler(regs,
799 ¤t->thread.fpu);
801 force_sig(sig, current);
802 #ifdef CONFIG_MIPS_MT_FPAFF
805 * MIPS MT processors may have fewer FPU contexts
806 * than CPU threads. If we've emulated more than
807 * some threshold number of instructions, force
808 * migration to a "CPU" that has FP support.
810 if(mt_fpemul_threshold > 0
811 && ((current->thread.emulated_fp++
812 > mt_fpemul_threshold))) {
814 * If there's no FPU present, or if the
815 * application has already restricted
816 * the allowed set to exclude any CPUs
817 * with FPUs, we'll skip the procedure.
819 if (cpus_intersects(current->cpus_allowed,
824 current->thread.user_cpus_allowed,
826 set_cpus_allowed(current, tmask);
827 current->thread.mflags |= MF_FPUBOUND;
831 #endif /* CONFIG_MIPS_MT_FPAFF */
838 die_if_kernel("do_cpu invoked from kernel context!", regs);
842 force_sig(SIGILL, current);
845 asmlinkage void do_mdmx(struct pt_regs *regs)
847 force_sig(SIGILL, current);
850 asmlinkage void do_watch(struct pt_regs *regs)
853 * We use the watch exception where available to detect stack
858 panic("Caught WATCH exception - probably caused by stack overflow.");
861 asmlinkage void do_mcheck(struct pt_regs *regs)
863 const int field = 2 * sizeof(unsigned long);
864 int multi_match = regs->cp0_status & ST0_TS;
869 printk("Index : %0x\n", read_c0_index());
870 printk("Pagemask: %0x\n", read_c0_pagemask());
871 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
872 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
873 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
878 show_code((unsigned int *) regs->cp0_epc);
881 * Some chips may have other causes of machine check (e.g. SB1
884 panic("Caught Machine Check exception - %scaused by multiple "
885 "matching entries in the TLB.",
886 (multi_match) ? "" : "not ");
889 asmlinkage void do_mt(struct pt_regs *regs)
893 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
894 >> VPECONTROL_EXCPT_SHIFT;
897 printk(KERN_DEBUG "Thread Underflow\n");
900 printk(KERN_DEBUG "Thread Overflow\n");
903 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
906 printk(KERN_DEBUG "Gating Storage Exception\n");
909 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
912 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
915 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
919 die_if_kernel("MIPS MT Thread exception in kernel", regs);
921 force_sig(SIGILL, current);
925 asmlinkage void do_dsp(struct pt_regs *regs)
928 panic("Unexpected DSP exception\n");
930 force_sig(SIGILL, current);
933 asmlinkage void do_reserved(struct pt_regs *regs)
936 * Game over - no way to handle this if it ever occurs. Most probably
937 * caused by a new unknown cpu type or after another deadly
938 * hard/software error.
941 panic("Caught reserved exception %ld - should not happen.",
942 (regs->cp0_cause & 0x7f) >> 2);
945 asmlinkage void do_default_vi(struct pt_regs *regs)
948 panic("Caught unexpected vectored interrupt.");
952 * Some MIPS CPUs can enable/disable for cache parity detection, but do
955 static inline void parity_protection_init(void)
957 switch (current_cpu_data.cputype) {
961 write_c0_ecc(0x80000000);
962 back_to_back_c0_hazard();
963 /* Set the PE bit (bit 31) in the c0_errctl register. */
964 printk(KERN_INFO "Cache parity protection %sabled\n",
965 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
969 /* Clear the DE bit (bit 16) in the c0_status register. */
970 printk(KERN_INFO "Enable cache parity protection for "
971 "MIPS 20KC/25KF CPUs.\n");
972 clear_c0_status(ST0_DE);
979 asmlinkage void cache_parity_error(void)
981 const int field = 2 * sizeof(unsigned long);
982 unsigned int reg_val;
984 /* For the moment, report the problem and hang. */
985 printk("Cache error exception:\n");
986 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
987 reg_val = read_c0_cacheerr();
988 printk("c0_cacheerr == %08x\n", reg_val);
990 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
991 reg_val & (1<<30) ? "secondary" : "primary",
992 reg_val & (1<<31) ? "data" : "insn");
993 printk("Error bits: %s%s%s%s%s%s%s\n",
994 reg_val & (1<<29) ? "ED " : "",
995 reg_val & (1<<28) ? "ET " : "",
996 reg_val & (1<<26) ? "EE " : "",
997 reg_val & (1<<25) ? "EB " : "",
998 reg_val & (1<<24) ? "EI " : "",
999 reg_val & (1<<23) ? "E1 " : "",
1000 reg_val & (1<<22) ? "E0 " : "");
1001 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1003 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1004 if (reg_val & (1<<22))
1005 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1007 if (reg_val & (1<<23))
1008 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1011 panic("Can't handle the cache error!");
1015 * SDBBP EJTAG debug exception handler.
1016 * We skip the instruction and return to the next instruction.
1018 void ejtag_exception_handler(struct pt_regs *regs)
1020 const int field = 2 * sizeof(unsigned long);
1021 unsigned long depc, old_epc;
1024 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1025 depc = read_c0_depc();
1026 debug = read_c0_debug();
1027 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1028 if (debug & 0x80000000) {
1030 * In branch delay slot.
1031 * We cheat a little bit here and use EPC to calculate the
1032 * debug return address (DEPC). EPC is restored after the
1035 old_epc = regs->cp0_epc;
1036 regs->cp0_epc = depc;
1037 __compute_return_epc(regs);
1038 depc = regs->cp0_epc;
1039 regs->cp0_epc = old_epc;
1042 write_c0_depc(depc);
1045 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1046 write_c0_debug(debug | 0x100);
1051 * NMI exception handler.
1053 void nmi_exception_handler(struct pt_regs *regs)
1055 #ifdef CONFIG_MIPS_MT_SMTC
1056 unsigned long dvpret = dvpe();
1058 printk("NMI taken!!!!\n");
1059 mips_mt_regdump(dvpret);
1062 printk("NMI taken!!!!\n");
1063 #endif /* CONFIG_MIPS_MT_SMTC */
1068 #define VECTORSPACING 0x100 /* for EI/VI mode */
1070 unsigned long ebase;
1071 unsigned long exception_handlers[32];
1072 unsigned long vi_handlers[64];
1075 * As a side effect of the way this is implemented we're limited
1076 * to interrupt handlers in the address range from
1077 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
1079 void *set_except_vector(int n, void *addr)
1081 unsigned long handler = (unsigned long) addr;
1082 unsigned long old_handler = exception_handlers[n];
1084 exception_handlers[n] = handler;
1085 if (n == 0 && cpu_has_divec) {
1086 *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
1087 (0x03ffffff & (handler >> 2));
1088 flush_icache_range(ebase + 0x200, ebase + 0x204);
1090 return (void *)old_handler;
1093 #ifdef CONFIG_CPU_MIPSR2_SRS
1095 * MIPSR2 shadow register set allocation
1099 static struct shadow_registers {
1101 * Number of shadow register sets supported
1103 unsigned long sr_supported;
1105 * Bitmap of allocated shadow registers
1107 unsigned long sr_allocated;
1110 static void mips_srs_init(void)
1112 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1113 printk(KERN_INFO "%d MIPSR2 register sets available\n",
1114 shadow_registers.sr_supported);
1115 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
1118 int mips_srs_max(void)
1120 return shadow_registers.sr_supported;
1123 int mips_srs_alloc(void)
1125 struct shadow_registers *sr = &shadow_registers;
1129 set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
1130 if (set >= sr->sr_supported)
1133 if (test_and_set_bit(set, &sr->sr_allocated))
1139 void mips_srs_free(int set)
1141 struct shadow_registers *sr = &shadow_registers;
1143 clear_bit(set, &sr->sr_allocated);
1146 static void *set_vi_srs_handler(int n, void *addr, int srs)
1148 unsigned long handler;
1149 unsigned long old_handler = vi_handlers[n];
1153 if (!cpu_has_veic && !cpu_has_vint)
1157 handler = (unsigned long) do_default_vi;
1160 handler = (unsigned long) addr;
1161 vi_handlers[n] = (unsigned long) addr;
1163 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1165 if (srs >= mips_srs_max())
1166 panic("Shadow register set %d not supported", srs);
1169 if (board_bind_eic_interrupt)
1170 board_bind_eic_interrupt (n, srs);
1171 } else if (cpu_has_vint) {
1172 /* SRSMap is only defined if shadow sets are implemented */
1173 if (mips_srs_max() > 1)
1174 change_c0_srsmap (0xf << n*4, srs << n*4);
1179 * If no shadow set is selected then use the default handler
1180 * that does normal register saving and a standard interrupt exit
1183 extern char except_vec_vi, except_vec_vi_lui;
1184 extern char except_vec_vi_ori, except_vec_vi_end;
1185 #ifdef CONFIG_MIPS_MT_SMTC
1187 * We need to provide the SMTC vectored interrupt handler
1188 * not only with the address of the handler, but with the
1189 * Status.IM bit to be masked before going there.
1191 extern char except_vec_vi_mori;
1192 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1193 #endif /* CONFIG_MIPS_MT_SMTC */
1194 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1195 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1196 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1198 if (handler_len > VECTORSPACING) {
1200 * Sigh... panicing won't help as the console
1201 * is probably not configured :(
1203 panic ("VECTORSPACING too small");
1206 memcpy (b, &except_vec_vi, handler_len);
1207 #ifdef CONFIG_MIPS_MT_SMTC
1209 printk("Vector index %d exceeds SMTC maximum\n", n);
1210 w = (u32 *)(b + mori_offset);
1211 *w = (*w & 0xffff0000) | (0x100 << n);
1212 #endif /* CONFIG_MIPS_MT_SMTC */
1213 w = (u32 *)(b + lui_offset);
1214 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1215 w = (u32 *)(b + ori_offset);
1216 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1217 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1221 * In other cases jump directly to the interrupt handler
1223 * It is the handlers responsibility to save registers if required
1224 * (eg hi/lo) and return from the exception using "eret"
1227 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1229 flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1232 return (void *)old_handler;
1235 void *set_vi_handler(int n, void *addr)
1237 return set_vi_srs_handler(n, addr, 0);
1242 static inline void mips_srs_init(void)
1246 #endif /* CONFIG_CPU_MIPSR2_SRS */
1249 * This is used by native signal handling
1251 asmlinkage int (*save_fp_context)(struct sigcontext *sc);
1252 asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
1254 extern asmlinkage int _save_fp_context(struct sigcontext *sc);
1255 extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1257 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1258 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1261 static int smp_save_fp_context(struct sigcontext *sc)
1264 ? _save_fp_context(sc)
1265 : fpu_emulator_save_context(sc);
1268 static int smp_restore_fp_context(struct sigcontext *sc)
1271 ? _restore_fp_context(sc)
1272 : fpu_emulator_restore_context(sc);
1276 static inline void signal_init(void)
1279 /* For now just do the cpu_has_fpu check when the functions are invoked */
1280 save_fp_context = smp_save_fp_context;
1281 restore_fp_context = smp_restore_fp_context;
1284 save_fp_context = _save_fp_context;
1285 restore_fp_context = _restore_fp_context;
1287 save_fp_context = fpu_emulator_save_context;
1288 restore_fp_context = fpu_emulator_restore_context;
1293 #ifdef CONFIG_MIPS32_COMPAT
1296 * This is used by 32-bit signal stuff on the 64-bit kernel
1298 asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
1299 asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
1301 extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
1302 extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
1304 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
1305 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
1307 static inline void signal32_init(void)
1310 save_fp_context32 = _save_fp_context32;
1311 restore_fp_context32 = _restore_fp_context32;
1313 save_fp_context32 = fpu_emulator_save_context32;
1314 restore_fp_context32 = fpu_emulator_restore_context32;
1319 extern void cpu_cache_init(void);
1320 extern void tlb_init(void);
1321 extern void flush_tlb_handlers(void);
1323 void __init per_cpu_trap_init(void)
1325 unsigned int cpu = smp_processor_id();
1326 unsigned int status_set = ST0_CU0;
1327 #ifdef CONFIG_MIPS_MT_SMTC
1328 int secondaryTC = 0;
1329 int bootTC = (cpu == 0);
1332 * Only do per_cpu_trap_init() for first TC of Each VPE.
1333 * Note that this hack assumes that the SMTC init code
1334 * assigns TCs consecutively and in ascending order.
1337 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1338 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1340 #endif /* CONFIG_MIPS_MT_SMTC */
1343 * Disable coprocessors and select 32-bit or 64-bit addressing
1344 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1345 * flag that some firmware may have left set and the TS bit (for
1346 * IP27). Set XX for ISA IV code to work.
1349 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1351 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1352 status_set |= ST0_XX;
1353 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1357 set_c0_status(ST0_MX);
1359 #ifdef CONFIG_CPU_MIPSR2
1360 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1363 #ifdef CONFIG_MIPS_MT_SMTC
1365 #endif /* CONFIG_MIPS_MT_SMTC */
1368 * Interrupt handling.
1370 if (cpu_has_veic || cpu_has_vint) {
1371 write_c0_ebase (ebase);
1372 /* Setting vector spacing enables EI/VI mode */
1373 change_c0_intctl (0x3e0, VECTORSPACING);
1375 if (cpu_has_divec) {
1376 if (cpu_has_mipsmt) {
1377 unsigned int vpflags = dvpe();
1378 set_c0_cause(CAUSEF_IV);
1381 set_c0_cause(CAUSEF_IV);
1383 #ifdef CONFIG_MIPS_MT_SMTC
1385 #endif /* CONFIG_MIPS_MT_SMTC */
1387 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1388 TLBMISS_HANDLER_SETUP();
1390 atomic_inc(&init_mm.mm_count);
1391 current->active_mm = &init_mm;
1392 BUG_ON(current->mm);
1393 enter_lazy_tlb(&init_mm, current);
1395 #ifdef CONFIG_MIPS_MT_SMTC
1397 #endif /* CONFIG_MIPS_MT_SMTC */
1400 #ifdef CONFIG_MIPS_MT_SMTC
1402 #endif /* CONFIG_MIPS_MT_SMTC */
1405 /* Install CPU exception handler */
1406 void __init set_handler (unsigned long offset, void *addr, unsigned long size)
1408 memcpy((void *)(ebase + offset), addr, size);
1409 flush_icache_range(ebase + offset, ebase + offset + size);
1412 /* Install uncached CPU exception handler */
1413 void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
1416 unsigned long uncached_ebase = KSEG1ADDR(ebase);
1419 unsigned long uncached_ebase = TO_UNCAC(ebase);
1422 memcpy((void *)(uncached_ebase + offset), addr, size);
1425 void __init trap_init(void)
1427 extern char except_vec3_generic, except_vec3_r4000;
1428 extern char except_vec4;
1431 if (cpu_has_veic || cpu_has_vint)
1432 ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
1438 per_cpu_trap_init();
1441 * Copy the generic exception handlers to their final destination.
1442 * This will be overriden later as suitable for a particular
1445 set_handler(0x180, &except_vec3_generic, 0x80);
1448 * Setup default vectors
1450 for (i = 0; i <= 31; i++)
1451 set_except_vector(i, handle_reserved);
1454 * Copy the EJTAG debug exception vector handler code to it's final
1457 if (cpu_has_ejtag && board_ejtag_handler_setup)
1458 board_ejtag_handler_setup ();
1461 * Only some CPUs have the watch exceptions.
1464 set_except_vector(23, handle_watch);
1467 * Initialise interrupt handlers
1469 if (cpu_has_veic || cpu_has_vint) {
1470 int nvec = cpu_has_veic ? 64 : 8;
1471 for (i = 0; i < nvec; i++)
1472 set_vi_handler(i, NULL);
1474 else if (cpu_has_divec)
1475 set_handler(0x200, &except_vec4, 0x8);
1478 * Some CPUs can enable/disable for cache parity detection, but does
1479 * it different ways.
1481 parity_protection_init();
1484 * The Data Bus Errors / Instruction Bus Errors are signaled
1485 * by external hardware. Therefore these two exceptions
1486 * may have board specific handlers.
1491 set_except_vector(0, handle_int);
1492 set_except_vector(1, handle_tlbm);
1493 set_except_vector(2, handle_tlbl);
1494 set_except_vector(3, handle_tlbs);
1496 set_except_vector(4, handle_adel);
1497 set_except_vector(5, handle_ades);
1499 set_except_vector(6, handle_ibe);
1500 set_except_vector(7, handle_dbe);
1502 set_except_vector(8, handle_sys);
1503 set_except_vector(9, handle_bp);
1504 set_except_vector(10, handle_ri);
1505 set_except_vector(11, handle_cpu);
1506 set_except_vector(12, handle_ov);
1507 set_except_vector(13, handle_tr);
1509 if (current_cpu_data.cputype == CPU_R6000 ||
1510 current_cpu_data.cputype == CPU_R6000A) {
1512 * The R6000 is the only R-series CPU that features a machine
1513 * check exception (similar to the R4000 cache error) and
1514 * unaligned ldc1/sdc1 exception. The handlers have not been
1515 * written yet. Well, anyway there is no R6000 machine on the
1516 * current list of targets for Linux/MIPS.
1517 * (Duh, crap, there is someone with a triple R6k machine)
1519 //set_except_vector(14, handle_mc);
1520 //set_except_vector(15, handle_ndc);
1524 if (board_nmi_handler_setup)
1525 board_nmi_handler_setup();
1527 if (cpu_has_fpu && !cpu_has_nofpuex)
1528 set_except_vector(15, handle_fpe);
1530 set_except_vector(22, handle_mdmx);
1533 set_except_vector(24, handle_mcheck);
1536 set_except_vector(25, handle_mt);
1539 set_except_vector(26, handle_dsp);
1542 /* Special exception: R4[04]00 uses also the divec space. */
1543 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1544 else if (cpu_has_4kex)
1545 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1547 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1550 #ifdef CONFIG_MIPS32_COMPAT
1554 flush_icache_range(ebase, ebase + 0x400);
1555 flush_tlb_handlers();