License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6-block.git] / arch / alpha / kernel / traps.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2/*
3 * arch/alpha/kernel/traps.c
4 *
5 * (C) Copyright 1994 Linus Torvalds
6 */
7
8/*
9 * This file initializes the trap entry points
10 */
11
037f436f 12#include <linux/jiffies.h>
1da177e4 13#include <linux/mm.h>
3f07c014 14#include <linux/sched/signal.h>
b17b0153 15#include <linux/sched/debug.h>
1da177e4
LT
16#include <linux/tty.h>
17#include <linux/delay.h>
9c14f835 18#include <linux/extable.h>
1da177e4 19#include <linux/kallsyms.h>
2a238a96 20#include <linux/ratelimit.h>
1da177e4
LT
21
22#include <asm/gentrap.h>
7c0f6ba6 23#include <linux/uaccess.h>
1da177e4
LT
24#include <asm/unaligned.h>
25#include <asm/sysinfo.h>
26#include <asm/hwrpb.h>
27#include <asm/mmu_context.h>
ec221208 28#include <asm/special_insns.h>
1da177e4
LT
29
30#include "proto.h"
31
32/* Work-around for some SRMs which mishandle opDEC faults. */
33
34static int opDEC_fix;
35
ab39c77c 36static void
1da177e4
LT
37opDEC_check(void)
38{
39 __asm__ __volatile__ (
40 /* Load the address of... */
41 " br $16, 1f\n"
42 /* A stub instruction fault handler. Just add 4 to the
43 pc and continue. */
44 " ldq $16, 8($sp)\n"
45 " addq $16, 4, $16\n"
46 " stq $16, 8($sp)\n"
47 " call_pal %[rti]\n"
48 /* Install the instruction fault handler. */
49 "1: lda $17, 3\n"
50 " call_pal %[wrent]\n"
51 /* With that in place, the fault from the round-to-minf fp
52 insn will arrive either at the "lda 4" insn (bad) or one
53 past that (good). This places the correct fixup in %0. */
54 " lda %[fix], 0\n"
55 " cvttq/svm $f31,$f31\n"
56 " lda %[fix], 4"
57 : [fix] "=r" (opDEC_fix)
58 : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
59 : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
60
61 if (opDEC_fix)
62 printk("opDEC fixup enabled.\n");
63}
64
65void
66dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
67{
68 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
69 regs->pc, regs->r26, regs->ps, print_tainted());
29b7a47a
JP
70 printk("pc is at %pSR\n", (void *)regs->pc);
71 printk("ra is at %pSR\n", (void *)regs->r26);
1da177e4
LT
72 printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
73 regs->r0, regs->r1, regs->r2);
74 printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
75 regs->r3, regs->r4, regs->r5);
76 printk("t5 = %016lx t6 = %016lx t7 = %016lx\n",
77 regs->r6, regs->r7, regs->r8);
78
79 if (r9_15) {
80 printk("s0 = %016lx s1 = %016lx s2 = %016lx\n",
81 r9_15[9], r9_15[10], r9_15[11]);
82 printk("s3 = %016lx s4 = %016lx s5 = %016lx\n",
83 r9_15[12], r9_15[13], r9_15[14]);
84 printk("s6 = %016lx\n", r9_15[15]);
85 }
86
87 printk("a0 = %016lx a1 = %016lx a2 = %016lx\n",
88 regs->r16, regs->r17, regs->r18);
89 printk("a3 = %016lx a4 = %016lx a5 = %016lx\n",
90 regs->r19, regs->r20, regs->r21);
91 printk("t8 = %016lx t9 = %016lx t10= %016lx\n",
92 regs->r22, regs->r23, regs->r24);
93 printk("t11= %016lx pv = %016lx at = %016lx\n",
94 regs->r25, regs->r27, regs->r28);
95 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
96#if 0
97__halt();
98#endif
99}
100
101#if 0
102static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
103 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
104 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
105 "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
106#endif
107
108static void
109dik_show_code(unsigned int *pc)
110{
111 long i;
112
113 printk("Code:");
114 for (i = -6; i < 2; i++) {
115 unsigned int insn;
116 if (__get_user(insn, (unsigned int __user *)pc + i))
117 break;
118 printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
119 }
120 printk("\n");
121}
122
123static void
124dik_show_trace(unsigned long *sp)
125{
126 long i = 0;
127 printk("Trace:\n");
128 while (0x1ff8 & (unsigned long) sp) {
129 extern char _stext[], _etext[];
130 unsigned long tmp = *sp;
131 sp++;
132 if (tmp < (unsigned long) &_stext)
133 continue;
134 if (tmp >= (unsigned long) &_etext)
135 continue;
29b7a47a 136 printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
1da177e4
LT
137 if (i > 40) {
138 printk(" ...");
139 break;
140 }
141 }
142 printk("\n");
143}
144
145static int kstack_depth_to_print = 24;
146
147void show_stack(struct task_struct *task, unsigned long *sp)
148{
149 unsigned long *stack;
150 int i;
151
152 /*
153 * debugging aid: "show_stack(NULL);" prints the
154 * back trace for this cpu.
155 */
156 if(sp==NULL)
157 sp=(unsigned long*)&sp;
158
159 stack = sp;
160 for(i=0; i < kstack_depth_to_print; i++) {
161 if (((long) stack & (THREAD_SIZE-1)) == 0)
162 break;
163 if (i && ((i % 4) == 0))
164 printk("\n ");
165 printk("%016lx ", *stack++);
166 }
167 printk("\n");
168 dik_show_trace(sp);
169}
170
1da177e4
LT
171void
172die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
173{
174 if (regs->ps & 8)
175 return;
176#ifdef CONFIG_SMP
177 printk("CPU %d ", hard_smp_processor_id());
178#endif
19c5870c 179 printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
1da177e4 180 dik_show_regs(regs, r9_15);
373d4d09 181 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
1da177e4
LT
182 dik_show_trace((unsigned long *)(regs+1));
183 dik_show_code((unsigned int *)regs->pc);
184
185 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
186 printk("die_if_kernel recursion detected.\n");
187 local_irq_enable();
188 while (1);
189 }
190 do_exit(SIGSEGV);
191}
192
193#ifndef CONFIG_MATHEMU
194static long dummy_emul(void) { return 0; }
195long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
196 = (void *)dummy_emul;
d9e3cb2f 197EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
1da177e4
LT
198long (*alpha_fp_emul) (unsigned long pc)
199 = (void *)dummy_emul;
d9e3cb2f 200EXPORT_SYMBOL_GPL(alpha_fp_emul);
1da177e4
LT
201#else
202long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
203long alpha_fp_emul (unsigned long pc);
204#endif
205
206asmlinkage void
207do_entArith(unsigned long summary, unsigned long write_mask,
208 struct pt_regs *regs)
209{
210 long si_code = FPE_FLTINV;
211 siginfo_t info;
212
213 if (summary & 1) {
214 /* Software-completion summary bit is set, so try to
215 emulate the instruction. If the processor supports
216 precise exceptions, we don't have to search. */
217 if (!amask(AMASK_PRECISE_TRAP))
218 si_code = alpha_fp_emul(regs->pc - 4);
219 else
220 si_code = alpha_fp_emul_imprecise(regs, write_mask);
221 if (si_code == 0)
222 return;
223 }
224 die_if_kernel("Arithmetic fault", regs, 0, NULL);
225
226 info.si_signo = SIGFPE;
227 info.si_errno = 0;
228 info.si_code = si_code;
229 info.si_addr = (void __user *) regs->pc;
230 send_sig_info(SIGFPE, &info, current);
231}
232
233asmlinkage void
234do_entIF(unsigned long type, struct pt_regs *regs)
235{
236 siginfo_t info;
237 int signo, code;
238
eaf05be0 239 if ((regs->ps & ~IPL_MAX) == 0) {
1da177e4
LT
240 if (type == 1) {
241 const unsigned int *data
242 = (const unsigned int *) regs->pc;
243 printk("Kernel bug at %s:%d\n",
244 (const char *)(data[1] | (long)data[2] << 32),
245 data[0]);
246 }
7f3bbb82
RH
247#ifdef CONFIG_ALPHA_WTINT
248 if (type == 4) {
249 /* If CALL_PAL WTINT is totally unsupported by the
250 PALcode, e.g. MILO, "emulate" it by overwriting
251 the insn. */
252 unsigned int *pinsn
253 = (unsigned int *) regs->pc - 1;
254 if (*pinsn == PAL_wtint) {
255 *pinsn = 0x47e01400; /* mov 0,$0 */
256 imb();
257 regs->r0 = 0;
258 return;
259 }
260 }
261#endif /* ALPHA_WTINT */
1da177e4
LT
262 die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
263 regs, type, NULL);
264 }
265
266 switch (type) {
267 case 0: /* breakpoint */
268 info.si_signo = SIGTRAP;
269 info.si_errno = 0;
270 info.si_code = TRAP_BRKPT;
271 info.si_trapno = 0;
272 info.si_addr = (void __user *) regs->pc;
273
274 if (ptrace_cancel_bpt(current)) {
275 regs->pc -= 4; /* make pc point to former bpt */
276 }
277
278 send_sig_info(SIGTRAP, &info, current);
279 return;
280
281 case 1: /* bugcheck */
282 info.si_signo = SIGTRAP;
283 info.si_errno = 0;
e2bd64d9 284 info.si_code = TRAP_FIXME;
1da177e4
LT
285 info.si_addr = (void __user *) regs->pc;
286 info.si_trapno = 0;
287 send_sig_info(SIGTRAP, &info, current);
288 return;
289
290 case 2: /* gentrap */
291 info.si_addr = (void __user *) regs->pc;
292 info.si_trapno = regs->r16;
293 switch ((long) regs->r16) {
294 case GEN_INTOVF:
295 signo = SIGFPE;
296 code = FPE_INTOVF;
297 break;
298 case GEN_INTDIV:
299 signo = SIGFPE;
300 code = FPE_INTDIV;
301 break;
302 case GEN_FLTOVF:
303 signo = SIGFPE;
304 code = FPE_FLTOVF;
305 break;
306 case GEN_FLTDIV:
307 signo = SIGFPE;
308 code = FPE_FLTDIV;
309 break;
310 case GEN_FLTUND:
311 signo = SIGFPE;
312 code = FPE_FLTUND;
313 break;
314 case GEN_FLTINV:
315 signo = SIGFPE;
316 code = FPE_FLTINV;
317 break;
318 case GEN_FLTINE:
319 signo = SIGFPE;
320 code = FPE_FLTRES;
321 break;
322 case GEN_ROPRAND:
323 signo = SIGFPE;
e2bd64d9 324 code = FPE_FIXME;
1da177e4
LT
325 break;
326
327 case GEN_DECOVF:
328 case GEN_DECDIV:
329 case GEN_DECINV:
330 case GEN_ASSERTERR:
331 case GEN_NULPTRERR:
332 case GEN_STKOVF:
333 case GEN_STRLENERR:
334 case GEN_SUBSTRERR:
335 case GEN_RANGERR:
336 case GEN_SUBRNG:
337 case GEN_SUBRNG1:
338 case GEN_SUBRNG2:
339 case GEN_SUBRNG3:
340 case GEN_SUBRNG4:
341 case GEN_SUBRNG5:
342 case GEN_SUBRNG6:
343 case GEN_SUBRNG7:
344 default:
345 signo = SIGTRAP;
e2bd64d9 346 code = TRAP_FIXME;
1da177e4
LT
347 break;
348 }
349
350 info.si_signo = signo;
351 info.si_errno = 0;
352 info.si_code = code;
353 info.si_addr = (void __user *) regs->pc;
354 send_sig_info(signo, &info, current);
355 return;
356
357 case 4: /* opDEC */
358 if (implver() == IMPLVER_EV4) {
359 long si_code;
360
361 /* The some versions of SRM do not handle
362 the opDEC properly - they return the PC of the
363 opDEC fault, not the instruction after as the
364 Alpha architecture requires. Here we fix it up.
365 We do this by intentionally causing an opDEC
366 fault during the boot sequence and testing if
367 we get the correct PC. If not, we set a flag
368 to correct it every time through. */
369 regs->pc += opDEC_fix;
370
371 /* EV4 does not implement anything except normal
372 rounding. Everything else will come here as
373 an illegal instruction. Emulate them. */
374 si_code = alpha_fp_emul(regs->pc - 4);
375 if (si_code == 0)
376 return;
377 if (si_code > 0) {
378 info.si_signo = SIGFPE;
379 info.si_errno = 0;
380 info.si_code = si_code;
381 info.si_addr = (void __user *) regs->pc;
382 send_sig_info(SIGFPE, &info, current);
383 return;
384 }
385 }
386 break;
387
388 case 3: /* FEN fault */
389 /* Irritating users can call PAL_clrfen to disable the
390 FPU for the process. The kernel will then trap in
391 do_switch_stack and undo_switch_stack when we try
392 to save and restore the FP registers.
393
394 Given that GCC by default generates code that uses the
395 FP registers, PAL_clrfen is not useful except for DoS
396 attacks. So turn the bleeding FPU back on and be done
397 with it. */
398 current_thread_info()->pcb.flags |= 1;
399 __reload_thread(&current_thread_info()->pcb);
400 return;
401
402 case 5: /* illoc */
403 default: /* unexpected instruction-fault type */
404 ;
405 }
406
407 info.si_signo = SIGILL;
408 info.si_errno = 0;
409 info.si_code = ILL_ILLOPC;
410 info.si_addr = (void __user *) regs->pc;
411 send_sig_info(SIGILL, &info, current);
412}
413
414/* There is an ifdef in the PALcode in MILO that enables a
415 "kernel debugging entry point" as an unprivileged call_pal.
416
417 We don't want to have anything to do with it, but unfortunately
418 several versions of MILO included in distributions have it enabled,
419 and if we don't put something on the entry point we'll oops. */
420
421asmlinkage void
422do_entDbg(struct pt_regs *regs)
423{
424 siginfo_t info;
425
426 die_if_kernel("Instruction fault", regs, 0, NULL);
427
428 info.si_signo = SIGILL;
429 info.si_errno = 0;
430 info.si_code = ILL_ILLOPC;
431 info.si_addr = (void __user *) regs->pc;
432 force_sig_info(SIGILL, &info, current);
433}
434
435
436/*
437 * entUna has a different register layout to be reasonably simple. It
438 * needs access to all the integer registers (the kernel doesn't use
439 * fp-regs), and it needs to have them in order for simpler access.
440 *
441 * Due to the non-standard register layout (and because we don't want
442 * to handle floating-point regs), user-mode unaligned accesses are
443 * handled separately by do_entUnaUser below.
444 *
445 * Oh, btw, we don't handle the "gp" register correctly, but if we fault
446 * on a gp-register unaligned load/store, something is _very_ wrong
447 * in the kernel anyway..
448 */
449struct allregs {
450 unsigned long regs[32];
451 unsigned long ps, pc, gp, a0, a1, a2;
452};
453
454struct unaligned_stat {
455 unsigned long count, va, pc;
456} unaligned[2];
457
458
459/* Macro for exception fixup code to access integer registers. */
d559d4a2 460#define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
1da177e4
LT
461
462
463asmlinkage void
464do_entUna(void * va, unsigned long opcode, unsigned long reg,
d70ddac1 465 struct allregs *regs)
1da177e4
LT
466{
467 long error, tmp1, tmp2, tmp3, tmp4;
d70ddac1 468 unsigned long pc = regs->pc - 4;
d559d4a2 469 unsigned long *_regs = regs->regs;
1da177e4
LT
470 const struct exception_table_entry *fixup;
471
472 unaligned[0].count++;
473 unaligned[0].va = (unsigned long) va;
474 unaligned[0].pc = pc;
475
476 /* We don't want to use the generic get/put unaligned macros as
477 we want to trap exceptions. Only if we actually get an
478 exception will we decide whether we should have caught it. */
479
480 switch (opcode) {
481 case 0x0c: /* ldwu */
482 __asm__ __volatile__(
483 "1: ldq_u %1,0(%3)\n"
484 "2: ldq_u %2,1(%3)\n"
485 " extwl %1,%3,%1\n"
486 " extwh %2,%3,%2\n"
487 "3:\n"
ca282f69
AV
488 EXC(1b,3b,%1,%0)
489 EXC(2b,3b,%2,%0)
1da177e4
LT
490 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
491 : "r"(va), "0"(0));
492 if (error)
493 goto got_exception;
494 una_reg(reg) = tmp1|tmp2;
495 return;
496
497 case 0x28: /* ldl */
498 __asm__ __volatile__(
499 "1: ldq_u %1,0(%3)\n"
500 "2: ldq_u %2,3(%3)\n"
501 " extll %1,%3,%1\n"
502 " extlh %2,%3,%2\n"
503 "3:\n"
ca282f69
AV
504 EXC(1b,3b,%1,%0)
505 EXC(2b,3b,%2,%0)
1da177e4
LT
506 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
507 : "r"(va), "0"(0));
508 if (error)
509 goto got_exception;
510 una_reg(reg) = (int)(tmp1|tmp2);
511 return;
512
513 case 0x29: /* ldq */
514 __asm__ __volatile__(
515 "1: ldq_u %1,0(%3)\n"
516 "2: ldq_u %2,7(%3)\n"
517 " extql %1,%3,%1\n"
518 " extqh %2,%3,%2\n"
519 "3:\n"
ca282f69
AV
520 EXC(1b,3b,%1,%0)
521 EXC(2b,3b,%2,%0)
1da177e4
LT
522 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
523 : "r"(va), "0"(0));
524 if (error)
525 goto got_exception;
526 una_reg(reg) = tmp1|tmp2;
527 return;
528
529 /* Note that the store sequences do not indicate that they change
530 memory because it _should_ be affecting nothing in this context.
531 (Otherwise we have other, much larger, problems.) */
532 case 0x0d: /* stw */
533 __asm__ __volatile__(
534 "1: ldq_u %2,1(%5)\n"
535 "2: ldq_u %1,0(%5)\n"
536 " inswh %6,%5,%4\n"
537 " inswl %6,%5,%3\n"
538 " mskwh %2,%5,%2\n"
539 " mskwl %1,%5,%1\n"
540 " or %2,%4,%2\n"
541 " or %1,%3,%1\n"
542 "3: stq_u %2,1(%5)\n"
543 "4: stq_u %1,0(%5)\n"
544 "5:\n"
ca282f69
AV
545 EXC(1b,5b,%2,%0)
546 EXC(2b,5b,%1,%0)
547 EXC(3b,5b,$31,%0)
548 EXC(4b,5b,$31,%0)
1da177e4
LT
549 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
550 "=&r"(tmp3), "=&r"(tmp4)
551 : "r"(va), "r"(una_reg(reg)), "0"(0));
552 if (error)
553 goto got_exception;
554 return;
555
556 case 0x2c: /* stl */
557 __asm__ __volatile__(
558 "1: ldq_u %2,3(%5)\n"
559 "2: ldq_u %1,0(%5)\n"
560 " inslh %6,%5,%4\n"
561 " insll %6,%5,%3\n"
562 " msklh %2,%5,%2\n"
563 " mskll %1,%5,%1\n"
564 " or %2,%4,%2\n"
565 " or %1,%3,%1\n"
566 "3: stq_u %2,3(%5)\n"
567 "4: stq_u %1,0(%5)\n"
568 "5:\n"
ca282f69
AV
569 EXC(1b,5b,%2,%0)
570 EXC(2b,5b,%1,%0)
571 EXC(3b,5b,$31,%0)
572 EXC(4b,5b,$31,%0)
1da177e4
LT
573 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
574 "=&r"(tmp3), "=&r"(tmp4)
575 : "r"(va), "r"(una_reg(reg)), "0"(0));
576 if (error)
577 goto got_exception;
578 return;
579
580 case 0x2d: /* stq */
581 __asm__ __volatile__(
582 "1: ldq_u %2,7(%5)\n"
583 "2: ldq_u %1,0(%5)\n"
584 " insqh %6,%5,%4\n"
585 " insql %6,%5,%3\n"
586 " mskqh %2,%5,%2\n"
587 " mskql %1,%5,%1\n"
588 " or %2,%4,%2\n"
589 " or %1,%3,%1\n"
590 "3: stq_u %2,7(%5)\n"
591 "4: stq_u %1,0(%5)\n"
592 "5:\n"
ca282f69
AV
593 EXC(1b,5b,%2,%0)
594 EXC(2b,5b,%1,%0)
595 EXC(3b,5b,$31,%0)
596 EXC(4b,5b,$31,%0)
1da177e4
LT
597 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
598 "=&r"(tmp3), "=&r"(tmp4)
599 : "r"(va), "r"(una_reg(reg)), "0"(0));
600 if (error)
601 goto got_exception;
602 return;
603 }
604
5f0e3da6 605 printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
1da177e4
LT
606 pc, va, opcode, reg);
607 do_exit(SIGSEGV);
608
609got_exception:
610 /* Ok, we caught the exception, but we don't want it. Is there
611 someone to pass it along to? */
612 if ((fixup = search_exception_tables(pc)) != 0) {
613 unsigned long newpc;
614 newpc = fixup_exception(una_reg, fixup, pc);
615
616 printk("Forwarding unaligned exception at %lx (%lx)\n",
617 pc, newpc);
618
d70ddac1 619 regs->pc = newpc;
1da177e4
LT
620 return;
621 }
622
623 /*
624 * Yikes! No one to forward the exception to.
625 * Since the registers are in a weird format, dump them ourselves.
626 */
1da177e4
LT
627
628 printk("%s(%d): unhandled unaligned exception\n",
19c5870c 629 current->comm, task_pid_nr(current));
1da177e4
LT
630
631 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
d70ddac1 632 pc, una_reg(26), regs->ps);
1da177e4
LT
633 printk("r0 = %016lx r1 = %016lx r2 = %016lx\n",
634 una_reg(0), una_reg(1), una_reg(2));
635 printk("r3 = %016lx r4 = %016lx r5 = %016lx\n",
636 una_reg(3), una_reg(4), una_reg(5));
637 printk("r6 = %016lx r7 = %016lx r8 = %016lx\n",
638 una_reg(6), una_reg(7), una_reg(8));
639 printk("r9 = %016lx r10= %016lx r11= %016lx\n",
640 una_reg(9), una_reg(10), una_reg(11));
641 printk("r12= %016lx r13= %016lx r14= %016lx\n",
642 una_reg(12), una_reg(13), una_reg(14));
643 printk("r15= %016lx\n", una_reg(15));
644 printk("r16= %016lx r17= %016lx r18= %016lx\n",
645 una_reg(16), una_reg(17), una_reg(18));
646 printk("r19= %016lx r20= %016lx r21= %016lx\n",
647 una_reg(19), una_reg(20), una_reg(21));
648 printk("r22= %016lx r23= %016lx r24= %016lx\n",
649 una_reg(22), una_reg(23), una_reg(24));
650 printk("r25= %016lx r27= %016lx r28= %016lx\n",
651 una_reg(25), una_reg(27), una_reg(28));
d70ddac1 652 printk("gp = %016lx sp = %p\n", regs->gp, regs+1);
1da177e4
LT
653
654 dik_show_code((unsigned int *)pc);
d70ddac1 655 dik_show_trace((unsigned long *)(regs+1));
1da177e4
LT
656
657 if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
658 printk("die_if_kernel recursion detected.\n");
659 local_irq_enable();
660 while (1);
661 }
662 do_exit(SIGSEGV);
663}
664
665/*
666 * Convert an s-floating point value in memory format to the
667 * corresponding value in register format. The exponent
668 * needs to be remapped to preserve non-finite values
669 * (infinities, not-a-numbers, denormals).
670 */
671static inline unsigned long
672s_mem_to_reg (unsigned long s_mem)
673{
674 unsigned long frac = (s_mem >> 0) & 0x7fffff;
675 unsigned long sign = (s_mem >> 31) & 0x1;
676 unsigned long exp_msb = (s_mem >> 30) & 0x1;
677 unsigned long exp_low = (s_mem >> 23) & 0x7f;
678 unsigned long exp;
679
680 exp = (exp_msb << 10) | exp_low; /* common case */
681 if (exp_msb) {
682 if (exp_low == 0x7f) {
683 exp = 0x7ff;
684 }
685 } else {
686 if (exp_low == 0x00) {
687 exp = 0x000;
688 } else {
689 exp |= (0x7 << 7);
690 }
691 }
692 return (sign << 63) | (exp << 52) | (frac << 29);
693}
694
695/*
696 * Convert an s-floating point value in register format to the
697 * corresponding value in memory format.
698 */
699static inline unsigned long
700s_reg_to_mem (unsigned long s_reg)
701{
702 return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
703}
704
705/*
706 * Handle user-level unaligned fault. Handling user-level unaligned
707 * faults is *extremely* slow and produces nasty messages. A user
708 * program *should* fix unaligned faults ASAP.
709 *
710 * Notice that we have (almost) the regular kernel stack layout here,
711 * so finding the appropriate registers is a little more difficult
712 * than in the kernel case.
713 *
714 * Finally, we handle regular integer load/stores only. In
715 * particular, load-linked/store-conditionally and floating point
716 * load/stores are not supported. The former make no sense with
717 * unaligned faults (they are guaranteed to fail) and I don't think
718 * the latter will occur in any decent program.
719 *
720 * Sigh. We *do* have to handle some FP operations, because GCC will
721 * uses them as temporary storage for integer memory to memory copies.
722 * However, we need to deal with stt/ldt and sts/lds only.
723 */
724
725#define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \
726 | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \
727 | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \
728 | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
729
730#define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \
731 | 1L << 0x2c | 1L << 0x2d /* stl stq */ \
732 | 1L << 0x0d | 1L << 0x0e ) /* stw stb */
733
734#define R(x) ((size_t) &((struct pt_regs *)0)->x)
735
736static int unauser_reg_offsets[32] = {
737 R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
738 /* r9 ... r15 are stored in front of regs. */
739 -56, -48, -40, -32, -24, -16, -8,
740 R(r16), R(r17), R(r18),
741 R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
742 R(r27), R(r28), R(gp),
743 0, 0
744};
745
746#undef R
747
748asmlinkage void
749do_entUnaUser(void __user * va, unsigned long opcode,
750 unsigned long reg, struct pt_regs *regs)
751{
2a238a96 752 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
1da177e4
LT
753
754 unsigned long tmp1, tmp2, tmp3, tmp4;
755 unsigned long fake_reg, *reg_addr = &fake_reg;
756 siginfo_t info;
757 long error;
758
759 /* Check the UAC bits to decide what the user wants us to do
760 with the unaliged access. */
761
3185bd26 762 if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
2a238a96 763 if (__ratelimit(&ratelimit)) {
1da177e4 764 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
19c5870c 765 current->comm, task_pid_nr(current),
1da177e4
LT
766 regs->pc - 4, va, opcode, reg);
767 }
1da177e4 768 }
3185bd26 769 if ((current_thread_info()->status & TS_UAC_SIGBUS))
1da177e4
LT
770 goto give_sigbus;
771 /* Not sure why you'd want to use this, but... */
3185bd26 772 if ((current_thread_info()->status & TS_UAC_NOFIX))
1da177e4
LT
773 return;
774
775 /* Don't bother reading ds in the access check since we already
776 know that this came from the user. Also rely on the fact that
777 the page at TASK_SIZE is unmapped and so can't be touched anyway. */
f14d6b4f 778 if ((unsigned long)va >= TASK_SIZE)
1da177e4
LT
779 goto give_sigsegv;
780
781 ++unaligned[1].count;
782 unaligned[1].va = (unsigned long)va;
783 unaligned[1].pc = regs->pc - 4;
784
785 if ((1L << opcode) & OP_INT_MASK) {
786 /* it's an integer load/store */
787 if (reg < 30) {
788 reg_addr = (unsigned long *)
789 ((char *)regs + unauser_reg_offsets[reg]);
790 } else if (reg == 30) {
791 /* usp in PAL regs */
792 fake_reg = rdusp();
793 } else {
794 /* zero "register" */
795 fake_reg = 0;
796 }
797 }
798
799 /* We don't want to use the generic get/put unaligned macros as
800 we want to trap exceptions. Only if we actually get an
801 exception will we decide whether we should have caught it. */
802
803 switch (opcode) {
804 case 0x0c: /* ldwu */
805 __asm__ __volatile__(
806 "1: ldq_u %1,0(%3)\n"
807 "2: ldq_u %2,1(%3)\n"
808 " extwl %1,%3,%1\n"
809 " extwh %2,%3,%2\n"
810 "3:\n"
ca282f69
AV
811 EXC(1b,3b,%1,%0)
812 EXC(2b,3b,%2,%0)
1da177e4
LT
813 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
814 : "r"(va), "0"(0));
815 if (error)
816 goto give_sigsegv;
817 *reg_addr = tmp1|tmp2;
818 break;
819
820 case 0x22: /* lds */
821 __asm__ __volatile__(
822 "1: ldq_u %1,0(%3)\n"
823 "2: ldq_u %2,3(%3)\n"
824 " extll %1,%3,%1\n"
825 " extlh %2,%3,%2\n"
826 "3:\n"
ca282f69
AV
827 EXC(1b,3b,%1,%0)
828 EXC(2b,3b,%2,%0)
1da177e4
LT
829 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
830 : "r"(va), "0"(0));
831 if (error)
832 goto give_sigsegv;
833 alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
834 return;
835
836 case 0x23: /* ldt */
837 __asm__ __volatile__(
838 "1: ldq_u %1,0(%3)\n"
839 "2: ldq_u %2,7(%3)\n"
840 " extql %1,%3,%1\n"
841 " extqh %2,%3,%2\n"
842 "3:\n"
ca282f69
AV
843 EXC(1b,3b,%1,%0)
844 EXC(2b,3b,%2,%0)
1da177e4
LT
845 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
846 : "r"(va), "0"(0));
847 if (error)
848 goto give_sigsegv;
849 alpha_write_fp_reg(reg, tmp1|tmp2);
850 return;
851
852 case 0x28: /* ldl */
853 __asm__ __volatile__(
854 "1: ldq_u %1,0(%3)\n"
855 "2: ldq_u %2,3(%3)\n"
856 " extll %1,%3,%1\n"
857 " extlh %2,%3,%2\n"
858 "3:\n"
ca282f69
AV
859 EXC(1b,3b,%1,%0)
860 EXC(2b,3b,%2,%0)
1da177e4
LT
861 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
862 : "r"(va), "0"(0));
863 if (error)
864 goto give_sigsegv;
865 *reg_addr = (int)(tmp1|tmp2);
866 break;
867
868 case 0x29: /* ldq */
869 __asm__ __volatile__(
870 "1: ldq_u %1,0(%3)\n"
871 "2: ldq_u %2,7(%3)\n"
872 " extql %1,%3,%1\n"
873 " extqh %2,%3,%2\n"
874 "3:\n"
ca282f69
AV
875 EXC(1b,3b,%1,%0)
876 EXC(2b,3b,%2,%0)
1da177e4
LT
877 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
878 : "r"(va), "0"(0));
879 if (error)
880 goto give_sigsegv;
881 *reg_addr = tmp1|tmp2;
882 break;
883
884 /* Note that the store sequences do not indicate that they change
885 memory because it _should_ be affecting nothing in this context.
886 (Otherwise we have other, much larger, problems.) */
887 case 0x0d: /* stw */
888 __asm__ __volatile__(
889 "1: ldq_u %2,1(%5)\n"
890 "2: ldq_u %1,0(%5)\n"
891 " inswh %6,%5,%4\n"
892 " inswl %6,%5,%3\n"
893 " mskwh %2,%5,%2\n"
894 " mskwl %1,%5,%1\n"
895 " or %2,%4,%2\n"
896 " or %1,%3,%1\n"
897 "3: stq_u %2,1(%5)\n"
898 "4: stq_u %1,0(%5)\n"
899 "5:\n"
ca282f69
AV
900 EXC(1b,5b,%2,%0)
901 EXC(2b,5b,%1,%0)
902 EXC(3b,5b,$31,%0)
903 EXC(4b,5b,$31,%0)
1da177e4
LT
904 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
905 "=&r"(tmp3), "=&r"(tmp4)
906 : "r"(va), "r"(*reg_addr), "0"(0));
907 if (error)
908 goto give_sigsegv;
909 return;
910
911 case 0x26: /* sts */
912 fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
913 /* FALLTHRU */
914
915 case 0x2c: /* stl */
916 __asm__ __volatile__(
917 "1: ldq_u %2,3(%5)\n"
918 "2: ldq_u %1,0(%5)\n"
919 " inslh %6,%5,%4\n"
920 " insll %6,%5,%3\n"
921 " msklh %2,%5,%2\n"
922 " mskll %1,%5,%1\n"
923 " or %2,%4,%2\n"
924 " or %1,%3,%1\n"
925 "3: stq_u %2,3(%5)\n"
926 "4: stq_u %1,0(%5)\n"
927 "5:\n"
ca282f69
AV
928 EXC(1b,5b,%2,%0)
929 EXC(2b,5b,%1,%0)
930 EXC(3b,5b,$31,%0)
931 EXC(4b,5b,$31,%0)
1da177e4
LT
932 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
933 "=&r"(tmp3), "=&r"(tmp4)
934 : "r"(va), "r"(*reg_addr), "0"(0));
935 if (error)
936 goto give_sigsegv;
937 return;
938
939 case 0x27: /* stt */
940 fake_reg = alpha_read_fp_reg(reg);
941 /* FALLTHRU */
942
943 case 0x2d: /* stq */
944 __asm__ __volatile__(
945 "1: ldq_u %2,7(%5)\n"
946 "2: ldq_u %1,0(%5)\n"
947 " insqh %6,%5,%4\n"
948 " insql %6,%5,%3\n"
949 " mskqh %2,%5,%2\n"
950 " mskql %1,%5,%1\n"
951 " or %2,%4,%2\n"
952 " or %1,%3,%1\n"
953 "3: stq_u %2,7(%5)\n"
954 "4: stq_u %1,0(%5)\n"
955 "5:\n"
ca282f69
AV
956 EXC(1b,5b,%2,%0)
957 EXC(2b,5b,%1,%0)
958 EXC(3b,5b,$31,%0)
959 EXC(4b,5b,$31,%0)
1da177e4
LT
960 : "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
961 "=&r"(tmp3), "=&r"(tmp4)
962 : "r"(va), "r"(*reg_addr), "0"(0));
963 if (error)
964 goto give_sigsegv;
965 return;
966
967 default:
968 /* What instruction were you trying to use, exactly? */
969 goto give_sigbus;
970 }
971
972 /* Only integer loads should get here; everyone else returns early. */
973 if (reg == 30)
974 wrusp(fake_reg);
975 return;
976
977give_sigsegv:
978 regs->pc -= 4; /* make pc point to faulting insn */
979 info.si_signo = SIGSEGV;
980 info.si_errno = 0;
981
982 /* We need to replicate some of the logic in mm/fault.c,
983 since we don't have access to the fault code in the
984 exception handling return path. */
f14d6b4f 985 if ((unsigned long)va >= TASK_SIZE)
1da177e4
LT
986 info.si_code = SEGV_ACCERR;
987 else {
988 struct mm_struct *mm = current->mm;
989 down_read(&mm->mmap_sem);
990 if (find_vma(mm, (unsigned long)va))
991 info.si_code = SEGV_ACCERR;
992 else
993 info.si_code = SEGV_MAPERR;
994 up_read(&mm->mmap_sem);
995 }
996 info.si_addr = va;
997 send_sig_info(SIGSEGV, &info, current);
998 return;
999
1000give_sigbus:
1001 regs->pc -= 4;
1002 info.si_signo = SIGBUS;
1003 info.si_errno = 0;
1004 info.si_code = BUS_ADRALN;
1005 info.si_addr = va;
1006 send_sig_info(SIGBUS, &info, current);
1007 return;
1008}
1009
ab39c77c 1010void
1da177e4
LT
1011trap_init(void)
1012{
1013 /* Tell PAL-code what global pointer we want in the kernel. */
1014 register unsigned long gptr __asm__("$29");
1015 wrkgp(gptr);
1016
1017 /* Hack for Multia (UDB) and JENSEN: some of their SRMs have
1018 a bug in the handling of the opDEC fault. Fix it up if so. */
1019 if (implver() == IMPLVER_EV4)
1020 opDEC_check();
1021
1022 wrent(entArith, 1);
1023 wrent(entMM, 2);
1024 wrent(entIF, 3);
1025 wrent(entUna, 4);
1026 wrent(entSys, 5);
1027 wrent(entDbg, 6);
1028}