Commit | Line | Data |
---|---|---|
6b002230 PM |
1 | /* |
2 | * 'traps.c' handles hardware traps and faults after we have saved some | |
3 | * state in 'entry.S'. | |
1da177e4 LT |
4 | * |
5 | * SuperH version: Copyright (C) 1999 Niibe Yutaka | |
6 | * Copyright (C) 2000 Philipp Rumpf | |
7 | * Copyright (C) 2000 David Howells | |
6b002230 PM |
8 | * Copyright (C) 2002 - 2006 Paul Mundt |
9 | * | |
10 | * This file is subject to the terms and conditions of the GNU General Public | |
11 | * License. See the file "COPYING" in the main directory of this archive | |
12 | * for more details. | |
1da177e4 | 13 | */ |
1da177e4 | 14 | #include <linux/kernel.h> |
1da177e4 | 15 | #include <linux/ptrace.h> |
1da177e4 | 16 | #include <linux/init.h> |
1da177e4 LT |
17 | #include <linux/spinlock.h> |
18 | #include <linux/module.h> | |
19 | #include <linux/kallsyms.h> | |
1f666587 | 20 | #include <linux/io.h> |
1da177e4 LT |
21 | #include <asm/system.h> |
22 | #include <asm/uaccess.h> | |
1da177e4 LT |
23 | |
24 | #ifdef CONFIG_SH_KGDB | |
25 | #include <asm/kgdb.h> | |
4b565680 TY |
26 | #define CHK_REMOTE_DEBUG(regs) \ |
27 | { \ | |
28 | if (kgdb_debug_hook && !user_mode(regs))\ | |
29 | (*kgdb_debug_hook)(regs); \ | |
1da177e4 LT |
30 | } |
31 | #else | |
32 | #define CHK_REMOTE_DEBUG(regs) | |
33 | #endif | |
34 | ||
1da177e4 | 35 | #ifdef CONFIG_CPU_SH2 |
0983b318 YS |
36 | # define TRAP_RESERVED_INST 4 |
37 | # define TRAP_ILLEGAL_SLOT_INST 6 | |
38 | # define TRAP_ADDRESS_ERROR 9 | |
39 | # ifdef CONFIG_CPU_SH2A | |
40 | # define TRAP_DIVZERO_ERROR 17 | |
41 | # define TRAP_DIVOVF_ERROR 18 | |
42 | # endif | |
1da177e4 LT |
43 | #else |
44 | #define TRAP_RESERVED_INST 12 | |
45 | #define TRAP_ILLEGAL_SLOT_INST 13 | |
46 | #endif | |
47 | ||
6b002230 PM |
48 | static void dump_mem(const char *str, unsigned long bottom, unsigned long top) |
49 | { | |
50 | unsigned long p; | |
51 | int i; | |
52 | ||
53 | printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top); | |
54 | ||
55 | for (p = bottom & ~31; p < top; ) { | |
56 | printk("%04lx: ", p & 0xffff); | |
57 | ||
58 | for (i = 0; i < 8; i++, p += 4) { | |
59 | unsigned int val; | |
60 | ||
61 | if (p < bottom || p >= top) | |
62 | printk(" "); | |
63 | else { | |
64 | if (__get_user(val, (unsigned int __user *)p)) { | |
65 | printk("\n"); | |
66 | return; | |
67 | } | |
68 | printk("%08x ", val); | |
69 | } | |
70 | } | |
71 | printk("\n"); | |
72 | } | |
73 | } | |
1da177e4 | 74 | |
765ae317 | 75 | DEFINE_SPINLOCK(die_lock); |
1da177e4 LT |
76 | |
77 | void die(const char * str, struct pt_regs * regs, long err) | |
78 | { | |
79 | static int die_counter; | |
80 | ||
81 | console_verbose(); | |
82 | spin_lock_irq(&die_lock); | |
6b002230 PM |
83 | bust_spinlocks(1); |
84 | ||
1da177e4 | 85 | printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter); |
6b002230 | 86 | |
1da177e4 | 87 | CHK_REMOTE_DEBUG(regs); |
6b002230 | 88 | print_modules(); |
1da177e4 | 89 | show_regs(regs); |
6b002230 PM |
90 | |
91 | printk("Process: %s (pid: %d, stack limit = %p)\n", | |
92 | current->comm, current->pid, task_stack_page(current) + 1); | |
93 | ||
94 | if (!user_mode(regs) || in_interrupt()) | |
95 | dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + | |
96 | (unsigned long)task_stack_page(current)); | |
97 | ||
98 | bust_spinlocks(0); | |
1da177e4 LT |
99 | spin_unlock_irq(&die_lock); |
100 | do_exit(SIGSEGV); | |
101 | } | |
102 | ||
6b002230 PM |
103 | static inline void die_if_kernel(const char *str, struct pt_regs *regs, |
104 | long err) | |
1da177e4 LT |
105 | { |
106 | if (!user_mode(regs)) | |
107 | die(str, regs, err); | |
108 | } | |
109 | ||
1da177e4 LT |
110 | /* |
111 | * try and fix up kernelspace address errors | |
112 | * - userspace errors just cause EFAULT to be returned, resulting in SEGV | |
113 | * - kernel/userspace interfaces cause a jump to an appropriate handler | |
114 | * - other kernel errors are bad | |
115 | * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault | |
116 | */ | |
117 | static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err) | |
118 | { | |
6b002230 | 119 | if (!user_mode(regs)) { |
1da177e4 LT |
120 | const struct exception_table_entry *fixup; |
121 | fixup = search_exception_tables(regs->pc); | |
122 | if (fixup) { | |
123 | regs->pc = fixup->fixup; | |
124 | return 0; | |
125 | } | |
126 | die(str, regs, err); | |
127 | } | |
128 | return -EFAULT; | |
129 | } | |
130 | ||
131 | /* | |
132 | * handle an instruction that does an unaligned memory access by emulating the | |
133 | * desired behaviour | |
134 | * - note that PC _may not_ point to the faulting instruction | |
135 | * (if that instruction is in a branch delay slot) | |
136 | * - return 0 if emulation okay, -EFAULT on existential error | |
137 | */ | |
138 | static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs) | |
139 | { | |
140 | int ret, index, count; | |
141 | unsigned long *rm, *rn; | |
142 | unsigned char *src, *dst; | |
143 | ||
144 | index = (instruction>>8)&15; /* 0x0F00 */ | |
145 | rn = ®s->regs[index]; | |
146 | ||
147 | index = (instruction>>4)&15; /* 0x00F0 */ | |
148 | rm = ®s->regs[index]; | |
149 | ||
150 | count = 1<<(instruction&3); | |
151 | ||
152 | ret = -EFAULT; | |
153 | switch (instruction>>12) { | |
154 | case 0: /* mov.[bwl] to/from memory via r0+rn */ | |
155 | if (instruction & 8) { | |
156 | /* from memory */ | |
157 | src = (unsigned char*) *rm; | |
158 | src += regs->regs[0]; | |
159 | dst = (unsigned char*) rn; | |
160 | *(unsigned long*)dst = 0; | |
161 | ||
162 | #ifdef __LITTLE_ENDIAN__ | |
163 | if (copy_from_user(dst, src, count)) | |
164 | goto fetch_fault; | |
165 | ||
166 | if ((count == 2) && dst[1] & 0x80) { | |
167 | dst[2] = 0xff; | |
168 | dst[3] = 0xff; | |
169 | } | |
170 | #else | |
171 | dst += 4-count; | |
172 | ||
173 | if (__copy_user(dst, src, count)) | |
174 | goto fetch_fault; | |
175 | ||
176 | if ((count == 2) && dst[2] & 0x80) { | |
177 | dst[0] = 0xff; | |
178 | dst[1] = 0xff; | |
179 | } | |
180 | #endif | |
181 | } else { | |
182 | /* to memory */ | |
183 | src = (unsigned char*) rm; | |
184 | #if !defined(__LITTLE_ENDIAN__) | |
185 | src += 4-count; | |
186 | #endif | |
187 | dst = (unsigned char*) *rn; | |
188 | dst += regs->regs[0]; | |
189 | ||
190 | if (copy_to_user(dst, src, count)) | |
191 | goto fetch_fault; | |
192 | } | |
193 | ret = 0; | |
194 | break; | |
195 | ||
196 | case 1: /* mov.l Rm,@(disp,Rn) */ | |
197 | src = (unsigned char*) rm; | |
198 | dst = (unsigned char*) *rn; | |
199 | dst += (instruction&0x000F)<<2; | |
200 | ||
201 | if (copy_to_user(dst,src,4)) | |
202 | goto fetch_fault; | |
203 | ret = 0; | |
204 | break; | |
205 | ||
206 | case 2: /* mov.[bwl] to memory, possibly with pre-decrement */ | |
207 | if (instruction & 4) | |
208 | *rn -= count; | |
209 | src = (unsigned char*) rm; | |
210 | dst = (unsigned char*) *rn; | |
211 | #if !defined(__LITTLE_ENDIAN__) | |
212 | src += 4-count; | |
213 | #endif | |
214 | if (copy_to_user(dst, src, count)) | |
215 | goto fetch_fault; | |
216 | ret = 0; | |
217 | break; | |
218 | ||
219 | case 5: /* mov.l @(disp,Rm),Rn */ | |
220 | src = (unsigned char*) *rm; | |
221 | src += (instruction&0x000F)<<2; | |
222 | dst = (unsigned char*) rn; | |
223 | *(unsigned long*)dst = 0; | |
224 | ||
225 | if (copy_from_user(dst,src,4)) | |
226 | goto fetch_fault; | |
227 | ret = 0; | |
228 | break; | |
229 | ||
230 | case 6: /* mov.[bwl] from memory, possibly with post-increment */ | |
231 | src = (unsigned char*) *rm; | |
232 | if (instruction & 4) | |
233 | *rm += count; | |
234 | dst = (unsigned char*) rn; | |
235 | *(unsigned long*)dst = 0; | |
236 | ||
237 | #ifdef __LITTLE_ENDIAN__ | |
238 | if (copy_from_user(dst, src, count)) | |
239 | goto fetch_fault; | |
240 | ||
241 | if ((count == 2) && dst[1] & 0x80) { | |
242 | dst[2] = 0xff; | |
243 | dst[3] = 0xff; | |
244 | } | |
245 | #else | |
246 | dst += 4-count; | |
247 | ||
248 | if (copy_from_user(dst, src, count)) | |
249 | goto fetch_fault; | |
250 | ||
251 | if ((count == 2) && dst[2] & 0x80) { | |
252 | dst[0] = 0xff; | |
253 | dst[1] = 0xff; | |
254 | } | |
255 | #endif | |
256 | ret = 0; | |
257 | break; | |
258 | ||
259 | case 8: | |
260 | switch ((instruction&0xFF00)>>8) { | |
261 | case 0x81: /* mov.w R0,@(disp,Rn) */ | |
262 | src = (unsigned char*) ®s->regs[0]; | |
263 | #if !defined(__LITTLE_ENDIAN__) | |
264 | src += 2; | |
265 | #endif | |
266 | dst = (unsigned char*) *rm; /* called Rn in the spec */ | |
267 | dst += (instruction&0x000F)<<1; | |
268 | ||
269 | if (copy_to_user(dst, src, 2)) | |
270 | goto fetch_fault; | |
271 | ret = 0; | |
272 | break; | |
273 | ||
274 | case 0x85: /* mov.w @(disp,Rm),R0 */ | |
275 | src = (unsigned char*) *rm; | |
276 | src += (instruction&0x000F)<<1; | |
277 | dst = (unsigned char*) ®s->regs[0]; | |
278 | *(unsigned long*)dst = 0; | |
279 | ||
280 | #if !defined(__LITTLE_ENDIAN__) | |
281 | dst += 2; | |
282 | #endif | |
283 | ||
284 | if (copy_from_user(dst, src, 2)) | |
285 | goto fetch_fault; | |
286 | ||
287 | #ifdef __LITTLE_ENDIAN__ | |
288 | if (dst[1] & 0x80) { | |
289 | dst[2] = 0xff; | |
290 | dst[3] = 0xff; | |
291 | } | |
292 | #else | |
293 | if (dst[2] & 0x80) { | |
294 | dst[0] = 0xff; | |
295 | dst[1] = 0xff; | |
296 | } | |
297 | #endif | |
298 | ret = 0; | |
299 | break; | |
300 | } | |
301 | break; | |
302 | } | |
303 | return ret; | |
304 | ||
305 | fetch_fault: | |
306 | /* Argh. Address not only misaligned but also non-existent. | |
307 | * Raise an EFAULT and see if it's trapped | |
308 | */ | |
309 | return die_if_no_fixup("Fault in unaligned fixup", regs, 0); | |
310 | } | |
311 | ||
312 | /* | |
313 | * emulate the instruction in the delay slot | |
314 | * - fetches the instruction from PC+2 | |
315 | */ | |
316 | static inline int handle_unaligned_delayslot(struct pt_regs *regs) | |
317 | { | |
318 | u16 instruction; | |
319 | ||
320 | if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) { | |
321 | /* the instruction-fetch faulted */ | |
322 | if (user_mode(regs)) | |
323 | return -EFAULT; | |
324 | ||
325 | /* kernel */ | |
326 | die("delay-slot-insn faulting in handle_unaligned_delayslot", regs, 0); | |
327 | } | |
328 | ||
329 | return handle_unaligned_ins(instruction,regs); | |
330 | } | |
331 | ||
332 | /* | |
333 | * handle an instruction that does an unaligned memory access | |
334 | * - have to be careful of branch delay-slot instructions that fault | |
335 | * SH3: | |
336 | * - if the branch would be taken PC points to the branch | |
337 | * - if the branch would not be taken, PC points to delay-slot | |
338 | * SH4: | |
339 | * - PC always points to delayed branch | |
340 | * - return 0 if handled, -EFAULT if failed (may not return if in kernel) | |
341 | */ | |
342 | ||
343 | /* Macros to determine offset from current PC for branch instructions */ | |
344 | /* Explicit type coercion is used to force sign extension where needed */ | |
345 | #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4) | |
346 | #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4) | |
347 | ||
710ee0cc PM |
348 | /* |
349 | * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit | |
350 | * opcodes.. | |
351 | */ | |
352 | #ifndef CONFIG_CPU_SH2A | |
353 | static int handle_unaligned_notify_count = 10; | |
354 | ||
1da177e4 LT |
355 | static int handle_unaligned_access(u16 instruction, struct pt_regs *regs) |
356 | { | |
357 | u_int rm; | |
358 | int ret, index; | |
359 | ||
360 | index = (instruction>>8)&15; /* 0x0F00 */ | |
361 | rm = regs->regs[index]; | |
362 | ||
363 | /* shout about the first ten userspace fixups */ | |
364 | if (user_mode(regs) && handle_unaligned_notify_count>0) { | |
365 | handle_unaligned_notify_count--; | |
366 | ||
367 | printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", | |
368 | current->comm,current->pid,(u16*)regs->pc,instruction); | |
369 | } | |
370 | ||
371 | ret = -EFAULT; | |
372 | switch (instruction&0xF000) { | |
373 | case 0x0000: | |
374 | if (instruction==0x000B) { | |
375 | /* rts */ | |
376 | ret = handle_unaligned_delayslot(regs); | |
377 | if (ret==0) | |
378 | regs->pc = regs->pr; | |
379 | } | |
380 | else if ((instruction&0x00FF)==0x0023) { | |
381 | /* braf @Rm */ | |
382 | ret = handle_unaligned_delayslot(regs); | |
383 | if (ret==0) | |
384 | regs->pc += rm + 4; | |
385 | } | |
386 | else if ((instruction&0x00FF)==0x0003) { | |
387 | /* bsrf @Rm */ | |
388 | ret = handle_unaligned_delayslot(regs); | |
389 | if (ret==0) { | |
390 | regs->pr = regs->pc + 4; | |
391 | regs->pc += rm + 4; | |
392 | } | |
393 | } | |
394 | else { | |
395 | /* mov.[bwl] to/from memory via r0+rn */ | |
396 | goto simple; | |
397 | } | |
398 | break; | |
399 | ||
400 | case 0x1000: /* mov.l Rm,@(disp,Rn) */ | |
401 | goto simple; | |
402 | ||
403 | case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */ | |
404 | goto simple; | |
405 | ||
406 | case 0x4000: | |
407 | if ((instruction&0x00FF)==0x002B) { | |
408 | /* jmp @Rm */ | |
409 | ret = handle_unaligned_delayslot(regs); | |
410 | if (ret==0) | |
411 | regs->pc = rm; | |
412 | } | |
413 | else if ((instruction&0x00FF)==0x000B) { | |
414 | /* jsr @Rm */ | |
415 | ret = handle_unaligned_delayslot(regs); | |
416 | if (ret==0) { | |
417 | regs->pr = regs->pc + 4; | |
418 | regs->pc = rm; | |
419 | } | |
420 | } | |
421 | else { | |
422 | /* mov.[bwl] to/from memory via r0+rn */ | |
423 | goto simple; | |
424 | } | |
425 | break; | |
426 | ||
427 | case 0x5000: /* mov.l @(disp,Rm),Rn */ | |
428 | goto simple; | |
429 | ||
430 | case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */ | |
431 | goto simple; | |
432 | ||
433 | case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */ | |
434 | switch (instruction&0x0F00) { | |
435 | case 0x0100: /* mov.w R0,@(disp,Rm) */ | |
436 | goto simple; | |
437 | case 0x0500: /* mov.w @(disp,Rm),R0 */ | |
438 | goto simple; | |
439 | case 0x0B00: /* bf lab - no delayslot*/ | |
440 | break; | |
441 | case 0x0F00: /* bf/s lab */ | |
442 | ret = handle_unaligned_delayslot(regs); | |
443 | if (ret==0) { | |
444 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | |
445 | if ((regs->sr & 0x00000001) != 0) | |
446 | regs->pc += 4; /* next after slot */ | |
447 | else | |
448 | #endif | |
449 | regs->pc += SH_PC_8BIT_OFFSET(instruction); | |
450 | } | |
451 | break; | |
452 | case 0x0900: /* bt lab - no delayslot */ | |
453 | break; | |
454 | case 0x0D00: /* bt/s lab */ | |
455 | ret = handle_unaligned_delayslot(regs); | |
456 | if (ret==0) { | |
457 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB) | |
458 | if ((regs->sr & 0x00000001) == 0) | |
459 | regs->pc += 4; /* next after slot */ | |
460 | else | |
461 | #endif | |
462 | regs->pc += SH_PC_8BIT_OFFSET(instruction); | |
463 | } | |
464 | break; | |
465 | } | |
466 | break; | |
467 | ||
468 | case 0xA000: /* bra label */ | |
469 | ret = handle_unaligned_delayslot(regs); | |
470 | if (ret==0) | |
471 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | |
472 | break; | |
473 | ||
474 | case 0xB000: /* bsr label */ | |
475 | ret = handle_unaligned_delayslot(regs); | |
476 | if (ret==0) { | |
477 | regs->pr = regs->pc + 4; | |
478 | regs->pc += SH_PC_12BIT_OFFSET(instruction); | |
479 | } | |
480 | break; | |
481 | } | |
482 | return ret; | |
483 | ||
484 | /* handle non-delay-slot instruction */ | |
485 | simple: | |
486 | ret = handle_unaligned_ins(instruction,regs); | |
487 | if (ret==0) | |
488 | regs->pc += 2; | |
489 | return ret; | |
490 | } | |
710ee0cc | 491 | #endif /* CONFIG_CPU_SH2A */ |
1da177e4 | 492 | |
0983b318 YS |
493 | #ifdef CONFIG_CPU_HAS_SR_RB |
494 | #define lookup_exception_vector(x) \ | |
495 | __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x))) | |
496 | #else | |
497 | #define lookup_exception_vector(x) \ | |
498 | __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x))) | |
499 | #endif | |
500 | ||
1da177e4 LT |
501 | /* |
502 | * Handle various address error exceptions | |
503 | */ | |
504 | asmlinkage void do_address_error(struct pt_regs *regs, | |
505 | unsigned long writeaccess, | |
506 | unsigned long address) | |
507 | { | |
0983b318 | 508 | unsigned long error_code = 0; |
1da177e4 | 509 | mm_segment_t oldfs; |
710ee0cc | 510 | #ifndef CONFIG_CPU_SH2A |
1da177e4 LT |
511 | u16 instruction; |
512 | int tmp; | |
710ee0cc | 513 | #endif |
1da177e4 | 514 | |
0983b318 YS |
515 | /* Intentional ifdef */ |
516 | #ifdef CONFIG_CPU_HAS_SR_RB | |
517 | lookup_exception_vector(error_code); | |
518 | #endif | |
1da177e4 LT |
519 | |
520 | oldfs = get_fs(); | |
521 | ||
522 | if (user_mode(regs)) { | |
523 | local_irq_enable(); | |
524 | current->thread.error_code = error_code; | |
0983b318 YS |
525 | #ifdef CONFIG_CPU_SH2 |
526 | /* | |
527 | * On the SH-2, we only have a single vector for address | |
528 | * errors, there's no differentiating between a load error | |
529 | * and a store error. | |
530 | */ | |
531 | current->thread.trap_no = 9; | |
532 | #else | |
1da177e4 | 533 | current->thread.trap_no = (writeaccess) ? 8 : 7; |
0983b318 | 534 | #endif |
1da177e4 LT |
535 | |
536 | /* bad PC is not something we can fix */ | |
537 | if (regs->pc & 1) | |
538 | goto uspace_segv; | |
539 | ||
0983b318 | 540 | #ifndef CONFIG_CPU_SH2A |
1da177e4 LT |
541 | set_fs(USER_DS); |
542 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | |
543 | /* Argh. Fault on the instruction itself. | |
544 | This should never happen non-SMP | |
545 | */ | |
546 | set_fs(oldfs); | |
547 | goto uspace_segv; | |
548 | } | |
549 | ||
550 | tmp = handle_unaligned_access(instruction, regs); | |
551 | set_fs(oldfs); | |
552 | ||
553 | if (tmp==0) | |
554 | return; /* sorted */ | |
0983b318 | 555 | #endif |
1da177e4 LT |
556 | |
557 | uspace_segv: | |
558 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); | |
559 | force_sig(SIGSEGV, current); | |
560 | } else { | |
561 | if (regs->pc & 1) | |
562 | die("unaligned program counter", regs, error_code); | |
563 | ||
0983b318 | 564 | #ifndef CONFIG_CPU_SH2A |
1da177e4 LT |
565 | set_fs(KERNEL_DS); |
566 | if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) { | |
567 | /* Argh. Fault on the instruction itself. | |
568 | This should never happen non-SMP | |
569 | */ | |
570 | set_fs(oldfs); | |
571 | die("insn faulting in do_address_error", regs, 0); | |
572 | } | |
573 | ||
574 | handle_unaligned_access(instruction, regs); | |
575 | set_fs(oldfs); | |
0983b318 YS |
576 | #else |
577 | printk(KERN_NOTICE "Killing process \"%s\" due to unaligned access\n", current->comm); | |
578 | force_sig(SIGSEGV, current); | |
579 | #endif | |
1da177e4 LT |
580 | } |
581 | } | |
582 | ||
583 | #ifdef CONFIG_SH_DSP | |
584 | /* | |
585 | * SH-DSP support gerg@snapgear.com. | |
586 | */ | |
587 | int is_dsp_inst(struct pt_regs *regs) | |
588 | { | |
589 | unsigned short inst; | |
590 | ||
591 | /* | |
592 | * Safe guard if DSP mode is already enabled or we're lacking | |
593 | * the DSP altogether. | |
594 | */ | |
595 | if (!(cpu_data->flags & CPU_HAS_DSP) || (regs->sr & SR_DSP)) | |
596 | return 0; | |
597 | ||
598 | get_user(inst, ((unsigned short *) regs->pc)); | |
599 | ||
600 | inst &= 0xf000; | |
601 | ||
602 | /* Check for any type of DSP or support instruction */ | |
603 | if ((inst == 0xf000) || (inst == 0x4000)) | |
604 | return 1; | |
605 | ||
606 | return 0; | |
607 | } | |
608 | #else | |
609 | #define is_dsp_inst(regs) (0) | |
610 | #endif /* CONFIG_SH_DSP */ | |
611 | ||
0983b318 YS |
612 | #ifdef CONFIG_CPU_SH2A |
613 | asmlinkage void do_divide_error(unsigned long r4, unsigned long r5, | |
614 | unsigned long r6, unsigned long r7, | |
615 | struct pt_regs regs) | |
616 | { | |
617 | siginfo_t info; | |
618 | ||
619 | current->thread.trap_no = r4; | |
620 | current->thread.error_code = 0; | |
621 | ||
622 | switch (r4) { | |
623 | case TRAP_DIVZERO_ERROR: | |
624 | info.si_code = FPE_INTDIV; | |
625 | break; | |
626 | case TRAP_DIVOVF_ERROR: | |
627 | info.si_code = FPE_INTOVF; | |
628 | break; | |
629 | } | |
630 | ||
631 | force_sig_info(SIGFPE, &info, current); | |
632 | } | |
633 | #endif | |
634 | ||
1f666587 PM |
635 | /* arch/sh/kernel/cpu/sh4/fpu.c */ |
636 | extern int do_fpu_inst(unsigned short, struct pt_regs *); | |
637 | extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5, | |
638 | unsigned long r6, unsigned long r7, struct pt_regs regs); | |
4b565680 TY |
639 | |
640 | asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5, | |
641 | unsigned long r6, unsigned long r7, | |
642 | struct pt_regs regs) | |
643 | { | |
644 | unsigned long error_code; | |
645 | struct task_struct *tsk = current; | |
646 | ||
647 | #ifdef CONFIG_SH_FPU_EMU | |
0983b318 | 648 | unsigned short inst = 0; |
4b565680 TY |
649 | int err; |
650 | ||
651 | get_user(inst, (unsigned short*)regs.pc); | |
652 | ||
653 | err = do_fpu_inst(inst, ®s); | |
654 | if (!err) { | |
655 | regs.pc += 2; | |
656 | return; | |
657 | } | |
658 | /* not a FPU inst. */ | |
659 | #endif | |
660 | ||
661 | #ifdef CONFIG_SH_DSP | |
662 | /* Check if it's a DSP instruction */ | |
663 | if (is_dsp_inst(®s)) { | |
664 | /* Enable DSP mode, and restart instruction. */ | |
665 | regs.sr |= SR_DSP; | |
666 | return; | |
667 | } | |
668 | #endif | |
669 | ||
0983b318 YS |
670 | lookup_exception_vector(error_code); |
671 | ||
4b565680 TY |
672 | local_irq_enable(); |
673 | tsk->thread.error_code = error_code; | |
674 | tsk->thread.trap_no = TRAP_RESERVED_INST; | |
675 | CHK_REMOTE_DEBUG(®s); | |
676 | force_sig(SIGILL, tsk); | |
677 | die_if_no_fixup("reserved instruction", ®s, error_code); | |
678 | } | |
679 | ||
680 | #ifdef CONFIG_SH_FPU_EMU | |
681 | static int emulate_branch(unsigned short inst, struct pt_regs* regs) | |
682 | { | |
683 | /* | |
684 | * bfs: 8fxx: PC+=d*2+4; | |
685 | * bts: 8dxx: PC+=d*2+4; | |
686 | * bra: axxx: PC+=D*2+4; | |
687 | * bsr: bxxx: PC+=D*2+4 after PR=PC+4; | |
688 | * braf:0x23: PC+=Rn*2+4; | |
689 | * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4; | |
690 | * jmp: 4x2b: PC=Rn; | |
691 | * jsr: 4x0b: PC=Rn after PR=PC+4; | |
692 | * rts: 000b: PC=PR; | |
693 | */ | |
694 | if ((inst & 0xfd00) == 0x8d00) { | |
695 | regs->pc += SH_PC_8BIT_OFFSET(inst); | |
696 | return 0; | |
697 | } | |
698 | ||
699 | if ((inst & 0xe000) == 0xa000) { | |
700 | regs->pc += SH_PC_12BIT_OFFSET(inst); | |
701 | return 0; | |
702 | } | |
703 | ||
704 | if ((inst & 0xf0df) == 0x0003) { | |
705 | regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4; | |
706 | return 0; | |
707 | } | |
708 | ||
709 | if ((inst & 0xf0df) == 0x400b) { | |
710 | regs->pc = regs->regs[(inst & 0x0f00) >> 8]; | |
711 | return 0; | |
712 | } | |
713 | ||
714 | if ((inst & 0xffff) == 0x000b) { | |
715 | regs->pc = regs->pr; | |
716 | return 0; | |
717 | } | |
718 | ||
719 | return 1; | |
720 | } | |
721 | #endif | |
722 | ||
723 | asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5, | |
724 | unsigned long r6, unsigned long r7, | |
725 | struct pt_regs regs) | |
726 | { | |
727 | unsigned long error_code; | |
728 | struct task_struct *tsk = current; | |
729 | #ifdef CONFIG_SH_FPU_EMU | |
0983b318 | 730 | unsigned short inst = 0; |
4b565680 TY |
731 | |
732 | get_user(inst, (unsigned short *)regs.pc + 1); | |
733 | if (!do_fpu_inst(inst, ®s)) { | |
734 | get_user(inst, (unsigned short *)regs.pc); | |
735 | if (!emulate_branch(inst, ®s)) | |
736 | return; | |
737 | /* fault in branch.*/ | |
738 | } | |
739 | /* not a FPU inst. */ | |
740 | #endif | |
741 | ||
0983b318 YS |
742 | lookup_exception_vector(error_code); |
743 | ||
4b565680 TY |
744 | local_irq_enable(); |
745 | tsk->thread.error_code = error_code; | |
746 | tsk->thread.trap_no = TRAP_RESERVED_INST; | |
747 | CHK_REMOTE_DEBUG(®s); | |
748 | force_sig(SIGILL, tsk); | |
749 | die_if_no_fixup("illegal slot instruction", ®s, error_code); | |
750 | } | |
1da177e4 LT |
751 | |
752 | asmlinkage void do_exception_error(unsigned long r4, unsigned long r5, | |
753 | unsigned long r6, unsigned long r7, | |
754 | struct pt_regs regs) | |
755 | { | |
756 | long ex; | |
0983b318 YS |
757 | |
758 | lookup_exception_vector(ex); | |
1da177e4 LT |
759 | die_if_kernel("exception", ®s, ex); |
760 | } | |
761 | ||
762 | #if defined(CONFIG_SH_STANDARD_BIOS) | |
763 | void *gdb_vbr_vector; | |
764 | ||
765 | static inline void __init gdb_vbr_init(void) | |
766 | { | |
767 | register unsigned long vbr; | |
768 | ||
769 | /* | |
770 | * Read the old value of the VBR register to initialise | |
771 | * the vector through which debug and BIOS traps are | |
772 | * delegated by the Linux trap handler. | |
773 | */ | |
774 | asm volatile("stc vbr, %0" : "=r" (vbr)); | |
775 | ||
776 | gdb_vbr_vector = (void *)(vbr + 0x100); | |
777 | printk("Setting GDB trap vector to 0x%08lx\n", | |
778 | (unsigned long)gdb_vbr_vector); | |
779 | } | |
780 | #endif | |
781 | ||
782 | void __init per_cpu_trap_init(void) | |
783 | { | |
784 | extern void *vbr_base; | |
785 | ||
786 | #ifdef CONFIG_SH_STANDARD_BIOS | |
787 | gdb_vbr_init(); | |
788 | #endif | |
789 | ||
790 | /* NOTE: The VBR value should be at P1 | |
791 | (or P2, virtural "fixed" address space). | |
792 | It's definitely should not in physical address. */ | |
793 | ||
794 | asm volatile("ldc %0, vbr" | |
795 | : /* no output */ | |
796 | : "r" (&vbr_base) | |
797 | : "memory"); | |
798 | } | |
799 | ||
1f666587 | 800 | void *set_exception_table_vec(unsigned int vec, void *handler) |
1da177e4 LT |
801 | { |
802 | extern void *exception_handling_table[]; | |
1f666587 PM |
803 | void *old_handler; |
804 | ||
805 | old_handler = exception_handling_table[vec]; | |
806 | exception_handling_table[vec] = handler; | |
807 | return old_handler; | |
808 | } | |
1da177e4 | 809 | |
0983b318 YS |
810 | extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5, |
811 | unsigned long r6, unsigned long r7, | |
812 | struct pt_regs regs); | |
813 | ||
1f666587 PM |
814 | void __init trap_init(void) |
815 | { | |
816 | set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst); | |
817 | set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst); | |
1da177e4 | 818 | |
4b565680 TY |
819 | #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \ |
820 | defined(CONFIG_SH_FPU_EMU) | |
821 | /* | |
822 | * For SH-4 lacking an FPU, treat floating point instructions as | |
823 | * reserved. They'll be handled in the math-emu case, or faulted on | |
824 | * otherwise. | |
825 | */ | |
1f666587 PM |
826 | set_exception_table_evt(0x800, do_reserved_inst); |
827 | set_exception_table_evt(0x820, do_illegal_slot_inst); | |
828 | #elif defined(CONFIG_SH_FPU) | |
829 | set_exception_table_evt(0x800, do_fpu_state_restore); | |
830 | set_exception_table_evt(0x820, do_fpu_state_restore); | |
1da177e4 | 831 | #endif |
0983b318 YS |
832 | |
833 | #ifdef CONFIG_CPU_SH2 | |
834 | set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler); | |
835 | #endif | |
836 | #ifdef CONFIG_CPU_SH2A | |
837 | set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error); | |
838 | set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error); | |
839 | #endif | |
1da177e4 LT |
840 | |
841 | /* Setup VBR for boot cpu */ | |
842 | per_cpu_trap_init(); | |
843 | } | |
844 | ||
6b002230 PM |
845 | void show_trace(struct task_struct *tsk, unsigned long *sp, |
846 | struct pt_regs *regs) | |
1da177e4 | 847 | { |
6b002230 | 848 | unsigned long addr; |
1da177e4 | 849 | |
6b002230 PM |
850 | if (regs && user_mode(regs)) |
851 | return; | |
1da177e4 LT |
852 | |
853 | printk("\nCall trace: "); | |
854 | #ifdef CONFIG_KALLSYMS | |
855 | printk("\n"); | |
856 | #endif | |
857 | ||
6b002230 PM |
858 | while (!kstack_end(sp)) { |
859 | addr = *sp++; | |
860 | if (kernel_text_address(addr)) | |
861 | print_ip_sym(addr); | |
1da177e4 LT |
862 | } |
863 | ||
864 | printk("\n"); | |
865 | } | |
866 | ||
6b002230 | 867 | void show_stack(struct task_struct *tsk, unsigned long *sp) |
1da177e4 | 868 | { |
6b002230 PM |
869 | unsigned long stack; |
870 | ||
871 | if (!tsk) | |
872 | tsk = current; | |
873 | if (tsk == current) | |
874 | sp = (unsigned long *)current_stack_pointer; | |
875 | else | |
876 | sp = (unsigned long *)tsk->thread.sp; | |
877 | ||
878 | stack = (unsigned long)sp; | |
879 | dump_mem("Stack: ", stack, THREAD_SIZE + | |
880 | (unsigned long)task_stack_page(tsk)); | |
881 | show_trace(tsk, sp, NULL); | |
1da177e4 LT |
882 | } |
883 | ||
884 | void dump_stack(void) | |
885 | { | |
886 | show_stack(NULL, NULL); | |
887 | } | |
888 | EXPORT_SYMBOL(dump_stack); |