sh: migrate to arch/sh/include/
[linux-2.6-block.git] / arch / sh / kernel / cpu / sh5 / entry.S
CommitLineData
1da177e4 1/*
a23ba435 2 * arch/sh/kernel/cpu/sh5/entry.S
1da177e4
LT
3 *
4 * Copyright (C) 2000, 2001 Paolo Alberelli
a23ba435
PM
5 * Copyright (C) 2004 - 2007 Paul Mundt
6 * Copyright (C) 2003, 2004 Richard Curnow
1da177e4 7 *
a23ba435
PM
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
1da177e4 11 */
1da177e4
LT
12#include <linux/errno.h>
13#include <linux/sys.h>
f15cbe6f 14#include <cpu/registers.h>
1da177e4 15#include <asm/processor.h>
1da177e4
LT
16#include <asm/unistd.h>
17#include <asm/thread_info.h>
18#include <asm/asm-offsets.h>
19
20/*
21 * SR fields.
22 */
23#define SR_ASID_MASK 0x00ff0000
24#define SR_FD_MASK 0x00008000
25#define SR_SS 0x08000000
26#define SR_BL 0x10000000
27#define SR_MD 0x40000000
28
29/*
30 * Event code.
31 */
32#define EVENT_INTERRUPT 0
33#define EVENT_FAULT_TLB 1
34#define EVENT_FAULT_NOT_TLB 2
35#define EVENT_DEBUG 3
36
37/* EXPEVT values */
38#define RESET_CAUSE 0x20
39#define DEBUGSS_CAUSE 0x980
40
41/*
42 * Frame layout. Quad index.
43 */
44#define FRAME_T(x) FRAME_TBASE+(x*8)
45#define FRAME_R(x) FRAME_RBASE+(x*8)
46#define FRAME_S(x) FRAME_SBASE+(x*8)
47#define FSPC 0
48#define FSSR 1
49#define FSYSCALL_ID 2
50
51/* Arrange the save frame to be a multiple of 32 bytes long */
52#define FRAME_SBASE 0
53#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
54#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
55#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
56#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
57
58#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
59#define FP_FRAME_BASE 0
60
61#define SAVED_R2 0*8
62#define SAVED_R3 1*8
63#define SAVED_R4 2*8
64#define SAVED_R5 3*8
65#define SAVED_R18 4*8
66#define SAVED_R6 5*8
67#define SAVED_TR0 6*8
68
69/* These are the registers saved in the TLB path that aren't saved in the first
70 level of the normal one. */
71#define TLB_SAVED_R25 7*8
72#define TLB_SAVED_TR1 8*8
73#define TLB_SAVED_TR2 9*8
74#define TLB_SAVED_TR3 10*8
75#define TLB_SAVED_TR4 11*8
76/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
77 breakage otherwise. */
78#define TLB_SAVED_R0 12*8
79#define TLB_SAVED_R1 13*8
80
81#define CLI() \
82 getcon SR, r6; \
83 ori r6, 0xf0, r6; \
84 putcon r6, SR;
85
86#define STI() \
87 getcon SR, r6; \
88 andi r6, ~0xf0, r6; \
89 putcon r6, SR;
90
91#ifdef CONFIG_PREEMPT
92# define preempt_stop() CLI()
93#else
94# define preempt_stop()
95# define resume_kernel restore_all
96#endif
97
98 .section .data, "aw"
99
100#define FAST_TLBMISS_STACK_CACHELINES 4
101#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
102
103/* Register back-up area for all exceptions */
104 .balign 32
105 /* Allow for 16 quadwords to be pushed by fast tlbmiss handling
106 * register saves etc. */
107 .fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
108/* This is 32 byte aligned by construction */
109/* Register back-up area for all exceptions */
110reg_save_area:
111 .quad 0
112 .quad 0
113 .quad 0
114 .quad 0
115
116 .quad 0
117 .quad 0
118 .quad 0
119 .quad 0
120
121 .quad 0
122 .quad 0
123 .quad 0
124 .quad 0
125
126 .quad 0
127 .quad 0
128
129/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
130 * reentrancy. Note this area may be accessed via physical address.
131 * Align so this fits a whole single cache line, for ease of purging.
132 */
133 .balign 32,0,32
134resvec_save_area:
135 .quad 0
136 .quad 0
137 .quad 0
138 .quad 0
139 .quad 0
140 .balign 32,0,32
141
142/* Jump table of 3rd level handlers */
143trap_jtable:
144 .long do_exception_error /* 0x000 */
145 .long do_exception_error /* 0x020 */
ccd80587 146#ifdef CONFIG_MMU
1da177e4
LT
147 .long tlb_miss_load /* 0x040 */
148 .long tlb_miss_store /* 0x060 */
ccd80587
PM
149#else
150 .long do_exception_error
151 .long do_exception_error
152#endif
1da177e4
LT
153 ! ARTIFICIAL pseudo-EXPEVT setting
154 .long do_debug_interrupt /* 0x080 */
ccd80587 155#ifdef CONFIG_MMU
1da177e4
LT
156 .long tlb_miss_load /* 0x0A0 */
157 .long tlb_miss_store /* 0x0C0 */
ccd80587
PM
158#else
159 .long do_exception_error
160 .long do_exception_error
161#endif
1da177e4
LT
162 .long do_address_error_load /* 0x0E0 */
163 .long do_address_error_store /* 0x100 */
164#ifdef CONFIG_SH_FPU
165 .long do_fpu_error /* 0x120 */
166#else
167 .long do_exception_error /* 0x120 */
168#endif
169 .long do_exception_error /* 0x140 */
170 .long system_call /* 0x160 */
171 .long do_reserved_inst /* 0x180 */
172 .long do_illegal_slot_inst /* 0x1A0 */
92b59258 173 .long do_exception_error /* 0x1C0 - NMI */
1da177e4
LT
174 .long do_exception_error /* 0x1E0 */
175 .rept 15
176 .long do_IRQ /* 0x200 - 0x3C0 */
177 .endr
178 .long do_exception_error /* 0x3E0 */
179 .rept 32
180 .long do_IRQ /* 0x400 - 0x7E0 */
181 .endr
182 .long fpu_error_or_IRQA /* 0x800 */
183 .long fpu_error_or_IRQB /* 0x820 */
184 .long do_IRQ /* 0x840 */
185 .long do_IRQ /* 0x860 */
186 .rept 6
187 .long do_exception_error /* 0x880 - 0x920 */
188 .endr
189 .long do_software_break_point /* 0x940 */
190 .long do_exception_error /* 0x960 */
191 .long do_single_step /* 0x980 */
192
193 .rept 3
194 .long do_exception_error /* 0x9A0 - 0x9E0 */
195 .endr
196 .long do_IRQ /* 0xA00 */
197 .long do_IRQ /* 0xA20 */
ccd80587 198#ifdef CONFIG_MMU
1da177e4 199 .long itlb_miss_or_IRQ /* 0xA40 */
ccd80587
PM
200#else
201 .long do_IRQ
202#endif
1da177e4
LT
203 .long do_IRQ /* 0xA60 */
204 .long do_IRQ /* 0xA80 */
ccd80587 205#ifdef CONFIG_MMU
1da177e4 206 .long itlb_miss_or_IRQ /* 0xAA0 */
ccd80587
PM
207#else
208 .long do_IRQ
209#endif
1da177e4
LT
210 .long do_exception_error /* 0xAC0 */
211 .long do_address_error_exec /* 0xAE0 */
212 .rept 8
213 .long do_exception_error /* 0xB00 - 0xBE0 */
214 .endr
215 .rept 18
216 .long do_IRQ /* 0xC00 - 0xE20 */
217 .endr
218
219 .section .text64, "ax"
220
221/*
222 * --- Exception/Interrupt/Event Handling Section
223 */
224
225/*
226 * VBR and RESVEC blocks.
227 *
228 * First level handler for VBR-based exceptions.
229 *
230 * To avoid waste of space, align to the maximum text block size.
231 * This is assumed to be at most 128 bytes or 32 instructions.
232 * DO NOT EXCEED 32 instructions on the first level handlers !
233 *
234 * Also note that RESVEC is contained within the VBR block
235 * where the room left (1KB - TEXT_SIZE) allows placing
236 * the RESVEC block (at most 512B + TEXT_SIZE).
237 *
238 * So first (and only) level handler for RESVEC-based exceptions.
239 *
240 * Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
241 * and interrupt) we are a lot tight with register space until
242 * saving onto the stack frame, which is done in handle_exception().
243 *
244 */
245
246#define TEXT_SIZE 128
247#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
248
249 .balign TEXT_SIZE
250LVBR_block:
251 .space 256, 0 /* Power-on class handler, */
252 /* not required here */
253not_a_tlb_miss:
254 synco /* TAKum03020 (but probably a good idea anyway.) */
255 /* Save original stack pointer into KCR1 */
256 putcon SP, KCR1
257
258 /* Save other original registers into reg_save_area */
259 movi reg_save_area, SP
260 st.q SP, SAVED_R2, r2
261 st.q SP, SAVED_R3, r3
262 st.q SP, SAVED_R4, r4
263 st.q SP, SAVED_R5, r5
264 st.q SP, SAVED_R6, r6
265 st.q SP, SAVED_R18, r18
266 gettr tr0, r3
267 st.q SP, SAVED_TR0, r3
268
269 /* Set args for Non-debug, Not a TLB miss class handler */
270 getcon EXPEVT, r2
271 movi ret_from_exception, r3
272 ori r3, 1, r3
273 movi EVENT_FAULT_NOT_TLB, r4
274 or SP, ZERO, r5
275 getcon KCR1, SP
276 pta handle_exception, tr0
277 blink tr0, ZERO
278
279 .balign 256
280 ! VBR+0x200
281 nop
282 .balign 256
283 ! VBR+0x300
284 nop
285 .balign 256
286 /*
287 * Instead of the natural .balign 1024 place RESVEC here
288 * respecting the final 1KB alignment.
289 */
290 .balign TEXT_SIZE
291 /*
292 * Instead of '.space 1024-TEXT_SIZE' place the RESVEC
293 * block making sure the final alignment is correct.
294 */
ccd80587 295#ifdef CONFIG_MMU
1da177e4
LT
296tlb_miss:
297 synco /* TAKum03020 (but probably a good idea anyway.) */
298 putcon SP, KCR1
299 movi reg_save_area, SP
300 /* SP is guaranteed 32-byte aligned. */
301 st.q SP, TLB_SAVED_R0 , r0
302 st.q SP, TLB_SAVED_R1 , r1
303 st.q SP, SAVED_R2 , r2
304 st.q SP, SAVED_R3 , r3
305 st.q SP, SAVED_R4 , r4
306 st.q SP, SAVED_R5 , r5
307 st.q SP, SAVED_R6 , r6
308 st.q SP, SAVED_R18, r18
309
310 /* Save R25 for safety; as/ld may want to use it to achieve the call to
311 * the code in mm/tlbmiss.c */
312 st.q SP, TLB_SAVED_R25, r25
313 gettr tr0, r2
314 gettr tr1, r3
315 gettr tr2, r4
316 gettr tr3, r5
317 gettr tr4, r18
318 st.q SP, SAVED_TR0 , r2
319 st.q SP, TLB_SAVED_TR1 , r3
320 st.q SP, TLB_SAVED_TR2 , r4
321 st.q SP, TLB_SAVED_TR3 , r5
322 st.q SP, TLB_SAVED_TR4 , r18
323
324 pt do_fast_page_fault, tr0
325 getcon SSR, r2
326 getcon EXPEVT, r3
327 getcon TEA, r4
328 shlri r2, 30, r2
329 andi r2, 1, r2 /* r2 = SSR.MD */
330 blink tr0, LINK
331
332 pt fixup_to_invoke_general_handler, tr1
333
334 /* If the fast path handler fixed the fault, just drop through quickly
335 to the restore code right away to return to the excepting context.
336 */
337 beqi/u r2, 0, tr1
338
339fast_tlb_miss_restore:
340 ld.q SP, SAVED_TR0, r2
341 ld.q SP, TLB_SAVED_TR1, r3
342 ld.q SP, TLB_SAVED_TR2, r4
343
344 ld.q SP, TLB_SAVED_TR3, r5
345 ld.q SP, TLB_SAVED_TR4, r18
346
347 ptabs r2, tr0
348 ptabs r3, tr1
349 ptabs r4, tr2
350 ptabs r5, tr3
351 ptabs r18, tr4
352
353 ld.q SP, TLB_SAVED_R0, r0
354 ld.q SP, TLB_SAVED_R1, r1
355 ld.q SP, SAVED_R2, r2
356 ld.q SP, SAVED_R3, r3
357 ld.q SP, SAVED_R4, r4
358 ld.q SP, SAVED_R5, r5
359 ld.q SP, SAVED_R6, r6
360 ld.q SP, SAVED_R18, r18
361 ld.q SP, TLB_SAVED_R25, r25
362
363 getcon KCR1, SP
364 rte
365 nop /* for safety, in case the code is run on sh5-101 cut1.x */
366
367fixup_to_invoke_general_handler:
368
369 /* OK, new method. Restore stuff that's not expected to get saved into
370 the 'first-level' reg save area, then just fall through to setting
371 up the registers and calling the second-level handler. */
372
373 /* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
374 r25,tr1-4 and save r6 to get into the right state. */
375
376 ld.q SP, TLB_SAVED_TR1, r3
377 ld.q SP, TLB_SAVED_TR2, r4
378 ld.q SP, TLB_SAVED_TR3, r5
379 ld.q SP, TLB_SAVED_TR4, r18
380 ld.q SP, TLB_SAVED_R25, r25
381
382 ld.q SP, TLB_SAVED_R0, r0
383 ld.q SP, TLB_SAVED_R1, r1
384
385 ptabs/u r3, tr1
386 ptabs/u r4, tr2
387 ptabs/u r5, tr3
388 ptabs/u r18, tr4
389
390 /* Set args for Non-debug, TLB miss class handler */
391 getcon EXPEVT, r2
392 movi ret_from_exception, r3
393 ori r3, 1, r3
394 movi EVENT_FAULT_TLB, r4
395 or SP, ZERO, r5
396 getcon KCR1, SP
397 pta handle_exception, tr0
398 blink tr0, ZERO
ccd80587
PM
399#else /* CONFIG_MMU */
400 .balign 256
401#endif
1da177e4
LT
402
403/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
404 DOES END UP AT VBR+0x600 */
405 nop
406 nop
407 nop
408 nop
409 nop
410 nop
411
412 .balign 256
413 /* VBR + 0x600 */
414
415interrupt:
416 synco /* TAKum03020 (but probably a good idea anyway.) */
417 /* Save original stack pointer into KCR1 */
418 putcon SP, KCR1
419
420 /* Save other original registers into reg_save_area */
421 movi reg_save_area, SP
422 st.q SP, SAVED_R2, r2
423 st.q SP, SAVED_R3, r3
424 st.q SP, SAVED_R4, r4
425 st.q SP, SAVED_R5, r5
426 st.q SP, SAVED_R6, r6
427 st.q SP, SAVED_R18, r18
428 gettr tr0, r3
429 st.q SP, SAVED_TR0, r3
430
431 /* Set args for interrupt class handler */
432 getcon INTEVT, r2
433 movi ret_from_irq, r3
434 ori r3, 1, r3
435 movi EVENT_INTERRUPT, r4
436 or SP, ZERO, r5
437 getcon KCR1, SP
438 pta handle_exception, tr0
439 blink tr0, ZERO
440 .balign TEXT_SIZE /* let's waste the bare minimum */
441
442LVBR_block_end: /* Marker. Used for total checking */
443
444 .balign 256
445LRESVEC_block:
446 /* Panic handler. Called with MMU off. Possible causes/actions:
447 * - Reset: Jump to program start.
448 * - Single Step: Turn off Single Step & return.
449 * - Others: Call panic handler, passing PC as arg.
450 * (this may need to be extended...)
451 */
452reset_or_panic:
453 synco /* TAKum03020 (but probably a good idea anyway.) */
454 putcon SP, DCR
455 /* First save r0-1 and tr0, as we need to use these */
36763b22 456 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
1da177e4
LT
457 st.q SP, 0, r0
458 st.q SP, 8, r1
459 gettr tr0, r0
460 st.q SP, 32, r0
461
462 /* Check cause */
463 getcon EXPEVT, r0
464 movi RESET_CAUSE, r1
465 sub r1, r0, r1 /* r1=0 if reset */
36763b22 466 movi _stext-CONFIG_PAGE_OFFSET, r0
1da177e4
LT
467 ori r0, 1, r0
468 ptabs r0, tr0
469 beqi r1, 0, tr0 /* Jump to start address if reset */
470
471 getcon EXPEVT, r0
472 movi DEBUGSS_CAUSE, r1
473 sub r1, r0, r1 /* r1=0 if single step */
474 pta single_step_panic, tr0
475 beqi r1, 0, tr0 /* jump if single step */
476
477 /* Now jump to where we save the registers. */
36763b22 478 movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
1da177e4
LT
479 ptabs r1, tr0
480 blink tr0, r63
481
482single_step_panic:
483 /* We are in a handler with Single Step set. We need to resume the
484 * handler, by turning on MMU & turning off Single Step. */
485 getcon SSR, r0
486 movi SR_MMU, r1
487 or r0, r1, r0
488 movi ~SR_SS, r1
489 and r0, r1, r0
490 putcon r0, SSR
491 /* Restore EXPEVT, as the rte won't do this */
492 getcon PEXPEVT, r0
493 putcon r0, EXPEVT
494 /* Restore regs */
495 ld.q SP, 32, r0
496 ptabs r0, tr0
497 ld.q SP, 0, r0
498 ld.q SP, 8, r1
499 getcon DCR, SP
500 synco
501 rte
502
503
504 .balign 256
505debug_exception:
506 synco /* TAKum03020 (but probably a good idea anyway.) */
507 /*
508 * Single step/software_break_point first level handler.
509 * Called with MMU off, so the first thing we do is enable it
510 * by doing an rte with appropriate SSR.
511 */
512 putcon SP, DCR
513 /* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
36763b22 514 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
1da177e4
LT
515
516 /* With the MMU off, we are bypassing the cache, so purge any
517 * data that will be made stale by the following stores.
518 */
519 ocbp SP, 0
520 synco
521
522 st.q SP, 0, r0
523 st.q SP, 8, r1
524 getcon SPC, r0
525 st.q SP, 16, r0
526 getcon SSR, r0
527 st.q SP, 24, r0
528
529 /* Enable MMU, block exceptions, set priv mode, disable single step */
530 movi SR_MMU | SR_BL | SR_MD, r1
531 or r0, r1, r0
532 movi ~SR_SS, r1
533 and r0, r1, r0
534 putcon r0, SSR
535 /* Force control to debug_exception_2 when rte is executed */
536 movi debug_exeception_2, r0
537 ori r0, 1, r0 /* force SHmedia, just in case */
538 putcon r0, SPC
539 getcon DCR, SP
540 synco
541 rte
542debug_exeception_2:
543 /* Restore saved regs */
544 putcon SP, KCR1
545 movi resvec_save_area, SP
546 ld.q SP, 24, r0
547 putcon r0, SSR
548 ld.q SP, 16, r0
549 putcon r0, SPC
550 ld.q SP, 0, r0
551 ld.q SP, 8, r1
552
553 /* Save other original registers into reg_save_area */
554 movi reg_save_area, SP
555 st.q SP, SAVED_R2, r2
556 st.q SP, SAVED_R3, r3
557 st.q SP, SAVED_R4, r4
558 st.q SP, SAVED_R5, r5
559 st.q SP, SAVED_R6, r6
560 st.q SP, SAVED_R18, r18
561 gettr tr0, r3
562 st.q SP, SAVED_TR0, r3
563
564 /* Set args for debug class handler */
565 getcon EXPEVT, r2
566 movi ret_from_exception, r3
567 ori r3, 1, r3
568 movi EVENT_DEBUG, r4
569 or SP, ZERO, r5
570 getcon KCR1, SP
571 pta handle_exception, tr0
572 blink tr0, ZERO
573
574 .balign 256
575debug_interrupt:
576 /* !!! WE COME HERE IN REAL MODE !!! */
577 /* Hook-up debug interrupt to allow various debugging options to be
578 * hooked into its handler. */
579 /* Save original stack pointer into KCR1 */
580 synco
581 putcon SP, KCR1
36763b22 582 movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
1da177e4
LT
583 ocbp SP, 0
584 ocbp SP, 32
585 synco
586
587 /* Save other original registers into reg_save_area thru real addresses */
588 st.q SP, SAVED_R2, r2
589 st.q SP, SAVED_R3, r3
590 st.q SP, SAVED_R4, r4
591 st.q SP, SAVED_R5, r5
592 st.q SP, SAVED_R6, r6
593 st.q SP, SAVED_R18, r18
594 gettr tr0, r3
595 st.q SP, SAVED_TR0, r3
596
597 /* move (spc,ssr)->(pspc,pssr). The rte will shift
598 them back again, so that they look like the originals
599 as far as the real handler code is concerned. */
600 getcon spc, r6
601 putcon r6, pspc
602 getcon ssr, r6
603 putcon r6, pssr
604
605 ! construct useful SR for handle_exception
606 movi 3, r6
607 shlli r6, 30, r6
608 getcon sr, r18
609 or r18, r6, r6
610 putcon r6, ssr
611
612 ! SSR is now the current SR with the MD and MMU bits set
613 ! i.e. the rte will switch back to priv mode and put
614 ! the mmu back on
615
616 ! construct spc
617 movi handle_exception, r18
618 ori r18, 1, r18 ! for safety (do we need this?)
619 putcon r18, spc
620
621 /* Set args for Non-debug, Not a TLB miss class handler */
622
623 ! EXPEVT==0x80 is unused, so 'steal' this value to put the
624 ! debug interrupt handler in the vectoring table
625 movi 0x80, r2
626 movi ret_from_exception, r3
627 ori r3, 1, r3
628 movi EVENT_FAULT_NOT_TLB, r4
629
630 or SP, ZERO, r5
36763b22 631 movi CONFIG_PAGE_OFFSET, r6
1da177e4
LT
632 add r6, r5, r5
633 getcon KCR1, SP
634
635 synco ! for safety
636 rte ! -> handle_exception, switch back to priv mode again
637
638LRESVEC_block_end: /* Marker. Unused. */
639
640 .balign TEXT_SIZE
641
642/*
643 * Second level handler for VBR-based exceptions. Pre-handler.
644 * In common to all stack-frame sensitive handlers.
645 *
646 * Inputs:
647 * (KCR0) Current [current task union]
648 * (KCR1) Original SP
649 * (r2) INTEVT/EXPEVT
650 * (r3) appropriate return address
651 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
652 * (r5) Pointer to reg_save_area
653 * (SP) Original SP
654 *
655 * Available registers:
656 * (r6)
657 * (r18)
658 * (tr0)
659 *
660 */
661handle_exception:
662 /* Common 2nd level handler. */
663
664 /* First thing we need an appropriate stack pointer */
665 getcon SSR, r6
666 shlri r6, 30, r6
667 andi r6, 1, r6
668 pta stack_ok, tr0
669 bne r6, ZERO, tr0 /* Original stack pointer is fine */
670
671 /* Set stack pointer for user fault */
672 getcon KCR0, SP
673 movi THREAD_SIZE, r6 /* Point to the end */
674 add SP, r6, SP
675
676stack_ok:
677
678/* DEBUG : check for underflow/overflow of the kernel stack */
679 pta no_underflow, tr0
680 getcon KCR0, r6
681 movi 1024, r18
682 add r6, r18, r6
683 bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
684
685/* Just panic to cause a crash. */
686bad_sp:
687 ld.b r63, 0, r6
688 nop
689
690no_underflow:
691 pta bad_sp, tr0
692 getcon kcr0, r6
693 movi THREAD_SIZE, r18
694 add r18, r6, r6
695 bgt SP, r6, tr0 ! sp above the stack
696
697 /* Make some room for the BASIC frame. */
698 movi -(FRAME_SIZE), r6
699 add SP, r6, SP
700
701/* Could do this with no stalling if we had another spare register, but the
702 code below will be OK. */
703 ld.q r5, SAVED_R2, r6
704 ld.q r5, SAVED_R3, r18
705 st.q SP, FRAME_R(2), r6
706 ld.q r5, SAVED_R4, r6
707 st.q SP, FRAME_R(3), r18
708 ld.q r5, SAVED_R5, r18
709 st.q SP, FRAME_R(4), r6
710 ld.q r5, SAVED_R6, r6
711 st.q SP, FRAME_R(5), r18
712 ld.q r5, SAVED_R18, r18
713 st.q SP, FRAME_R(6), r6
714 ld.q r5, SAVED_TR0, r6
715 st.q SP, FRAME_R(18), r18
716 st.q SP, FRAME_T(0), r6
717
718 /* Keep old SP around */
719 getcon KCR1, r6
720
721 /* Save the rest of the general purpose registers */
722 st.q SP, FRAME_R(0), r0
723 st.q SP, FRAME_R(1), r1
724 st.q SP, FRAME_R(7), r7
725 st.q SP, FRAME_R(8), r8
726 st.q SP, FRAME_R(9), r9
727 st.q SP, FRAME_R(10), r10
728 st.q SP, FRAME_R(11), r11
729 st.q SP, FRAME_R(12), r12
730 st.q SP, FRAME_R(13), r13
731 st.q SP, FRAME_R(14), r14
732
733 /* SP is somewhere else */
734 st.q SP, FRAME_R(15), r6
735
736 st.q SP, FRAME_R(16), r16
737 st.q SP, FRAME_R(17), r17
738 /* r18 is saved earlier. */
739 st.q SP, FRAME_R(19), r19
740 st.q SP, FRAME_R(20), r20
741 st.q SP, FRAME_R(21), r21
742 st.q SP, FRAME_R(22), r22
743 st.q SP, FRAME_R(23), r23
744 st.q SP, FRAME_R(24), r24
745 st.q SP, FRAME_R(25), r25
746 st.q SP, FRAME_R(26), r26
747 st.q SP, FRAME_R(27), r27
748 st.q SP, FRAME_R(28), r28
749 st.q SP, FRAME_R(29), r29
750 st.q SP, FRAME_R(30), r30
751 st.q SP, FRAME_R(31), r31
752 st.q SP, FRAME_R(32), r32
753 st.q SP, FRAME_R(33), r33
754 st.q SP, FRAME_R(34), r34
755 st.q SP, FRAME_R(35), r35
756 st.q SP, FRAME_R(36), r36
757 st.q SP, FRAME_R(37), r37
758 st.q SP, FRAME_R(38), r38
759 st.q SP, FRAME_R(39), r39
760 st.q SP, FRAME_R(40), r40
761 st.q SP, FRAME_R(41), r41
762 st.q SP, FRAME_R(42), r42
763 st.q SP, FRAME_R(43), r43
764 st.q SP, FRAME_R(44), r44
765 st.q SP, FRAME_R(45), r45
766 st.q SP, FRAME_R(46), r46
767 st.q SP, FRAME_R(47), r47
768 st.q SP, FRAME_R(48), r48
769 st.q SP, FRAME_R(49), r49
770 st.q SP, FRAME_R(50), r50
771 st.q SP, FRAME_R(51), r51
772 st.q SP, FRAME_R(52), r52
773 st.q SP, FRAME_R(53), r53
774 st.q SP, FRAME_R(54), r54
775 st.q SP, FRAME_R(55), r55
776 st.q SP, FRAME_R(56), r56
777 st.q SP, FRAME_R(57), r57
778 st.q SP, FRAME_R(58), r58
779 st.q SP, FRAME_R(59), r59
780 st.q SP, FRAME_R(60), r60
781 st.q SP, FRAME_R(61), r61
782 st.q SP, FRAME_R(62), r62
783
784 /*
785 * Save the S* registers.
786 */
787 getcon SSR, r61
788 st.q SP, FRAME_S(FSSR), r61
789 getcon SPC, r62
790 st.q SP, FRAME_S(FSPC), r62
791 movi -1, r62 /* Reset syscall_nr */
792 st.q SP, FRAME_S(FSYSCALL_ID), r62
793
794 /* Save the rest of the target registers */
795 gettr tr1, r6
796 st.q SP, FRAME_T(1), r6
797 gettr tr2, r6
798 st.q SP, FRAME_T(2), r6
799 gettr tr3, r6
800 st.q SP, FRAME_T(3), r6
801 gettr tr4, r6
802 st.q SP, FRAME_T(4), r6
803 gettr tr5, r6
804 st.q SP, FRAME_T(5), r6
805 gettr tr6, r6
806 st.q SP, FRAME_T(6), r6
807 gettr tr7, r6
808 st.q SP, FRAME_T(7), r6
809
810 ! setup FP so that unwinder can wind back through nested kernel mode
811 ! exceptions
812 add SP, ZERO, r14
813
814#ifdef CONFIG_POOR_MANS_STRACE
815 /* We've pushed all the registers now, so only r2-r4 hold anything
816 * useful. Move them into callee save registers */
817 or r2, ZERO, r28
818 or r3, ZERO, r29
819 or r4, ZERO, r30
820
821 /* Preserve r2 as the event code */
822 movi evt_debug, r3
823 ori r3, 1, r3
824 ptabs r3, tr0
825
826 or SP, ZERO, r6
827 getcon TRA, r5
828 blink tr0, LINK
829
830 or r28, ZERO, r2
831 or r29, ZERO, r3
832 or r30, ZERO, r4
833#endif
834
835 /* For syscall and debug race condition, get TRA now */
836 getcon TRA, r5
837
838 /* We are in a safe position to turn SR.BL off, but set IMASK=0xf
839 * Also set FD, to catch FPU usage in the kernel.
840 *
841 * benedict.gaster@superh.com 29/07/2002
842 *
843 * On all SH5-101 revisions it is unsafe to raise the IMASK and at the
844 * same time change BL from 1->0, as any pending interrupt of a level
845 * higher than he previous value of IMASK will leak through and be
846 * taken unexpectedly.
847 *
848 * To avoid this we raise the IMASK and then issue another PUTCON to
849 * enable interrupts.
850 */
851 getcon SR, r6
852 movi SR_IMASK | SR_FD, r7
853 or r6, r7, r6
854 putcon r6, SR
855 movi SR_UNBLOCK_EXC, r7
856 and r6, r7, r6
857 putcon r6, SR
858
859
860 /* Now call the appropriate 3rd level handler */
861 or r3, ZERO, LINK
862 movi trap_jtable, r3
863 shlri r2, 3, r2
864 ldx.l r2, r3, r3
865 shlri r2, 2, r2
866 ptabs r3, tr0
867 or SP, ZERO, r3
868 blink tr0, ZERO
869
870/*
871 * Second level handler for VBR-based exceptions. Post-handlers.
872 *
873 * Post-handlers for interrupts (ret_from_irq), exceptions
874 * (ret_from_exception) and common reentrance doors (restore_all
875 * to get back to the original context, ret_from_syscall loop to
876 * check kernel exiting).
877 *
878 * ret_with_reschedule and work_notifysig are an inner lables of
879 * the ret_from_syscall loop.
880 *
881 * In common to all stack-frame sensitive handlers.
882 *
883 * Inputs:
884 * (SP) struct pt_regs *, original register's frame pointer (basic)
885 *
886 */
887 .global ret_from_irq
888ret_from_irq:
889#ifdef CONFIG_POOR_MANS_STRACE
890 pta evt_debug_ret_from_irq, tr0
891 ori SP, 0, r2
892 blink tr0, LINK
893#endif
894 ld.q SP, FRAME_S(FSSR), r6
895 shlri r6, 30, r6
896 andi r6, 1, r6
897 pta resume_kernel, tr0
898 bne r6, ZERO, tr0 /* no further checks */
899 STI()
900 pta ret_with_reschedule, tr0
901 blink tr0, ZERO /* Do not check softirqs */
902
903 .global ret_from_exception
904ret_from_exception:
905 preempt_stop()
906
907#ifdef CONFIG_POOR_MANS_STRACE
908 pta evt_debug_ret_from_exc, tr0
909 ori SP, 0, r2
910 blink tr0, LINK
911#endif
912
913 ld.q SP, FRAME_S(FSSR), r6
914 shlri r6, 30, r6
915 andi r6, 1, r6
916 pta resume_kernel, tr0
917 bne r6, ZERO, tr0 /* no further checks */
918
919 /* Check softirqs */
920
921#ifdef CONFIG_PREEMPT
922 pta ret_from_syscall, tr0
923 blink tr0, ZERO
924
925resume_kernel:
926 pta restore_all, tr0
927
928 getcon KCR0, r6
929 ld.l r6, TI_PRE_COUNT, r7
930 beq/u r7, ZERO, tr0
931
932need_resched:
933 ld.l r6, TI_FLAGS, r7
934 movi (1 << TIF_NEED_RESCHED), r8
935 and r8, r7, r8
936 bne r8, ZERO, tr0
937
938 getcon SR, r7
939 andi r7, 0xf0, r7
940 bne r7, ZERO, tr0
941
942 movi ((PREEMPT_ACTIVE >> 16) & 65535), r8
943 shori (PREEMPT_ACTIVE & 65535), r8
944 st.l r6, TI_PRE_COUNT, r8
945
946 STI()
947 movi schedule, r7
948 ori r7, 1, r7
949 ptabs r7, tr1
950 blink tr1, LINK
951
952 st.l r6, TI_PRE_COUNT, ZERO
953 CLI()
954
955 pta need_resched, tr1
956 blink tr1, ZERO
957#endif
958
959 .global ret_from_syscall
960ret_from_syscall:
961
962ret_with_reschedule:
963 getcon KCR0, r6 ! r6 contains current_thread_info
964 ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
965
c18fe9a0 966 movi _TIF_NEED_RESCHED, r8
1da177e4
LT
967 and r8, r7, r8
968 pta work_resched, tr0
969 bne r8, ZERO, tr0
970
971 pta restore_all, tr1
972
c18fe9a0 973 movi (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r8
1da177e4
LT
974 and r8, r7, r8
975 pta work_notifysig, tr0
976 bne r8, ZERO, tr0
977
978 blink tr1, ZERO
979
980work_resched:
981 pta ret_from_syscall, tr0
982 gettr tr0, LINK
983 movi schedule, r6
984 ptabs r6, tr0
985 blink tr0, ZERO /* Call schedule(), return on top */
986
987work_notifysig:
988 gettr tr1, LINK
989
990 movi do_signal, r6
991 ptabs r6, tr0
992 or SP, ZERO, r2
993 or ZERO, ZERO, r3
994 blink tr0, LINK /* Call do_signal(regs, 0), return here */
995
996restore_all:
997 /* Do prefetches */
998
999 ld.q SP, FRAME_T(0), r6
1000 ld.q SP, FRAME_T(1), r7
1001 ld.q SP, FRAME_T(2), r8
1002 ld.q SP, FRAME_T(3), r9
1003 ptabs r6, tr0
1004 ptabs r7, tr1
1005 ptabs r8, tr2
1006 ptabs r9, tr3
1007 ld.q SP, FRAME_T(4), r6
1008 ld.q SP, FRAME_T(5), r7
1009 ld.q SP, FRAME_T(6), r8
1010 ld.q SP, FRAME_T(7), r9
1011 ptabs r6, tr4
1012 ptabs r7, tr5
1013 ptabs r8, tr6
1014 ptabs r9, tr7
1015
1016 ld.q SP, FRAME_R(0), r0
1017 ld.q SP, FRAME_R(1), r1
1018 ld.q SP, FRAME_R(2), r2
1019 ld.q SP, FRAME_R(3), r3
1020 ld.q SP, FRAME_R(4), r4
1021 ld.q SP, FRAME_R(5), r5
1022 ld.q SP, FRAME_R(6), r6
1023 ld.q SP, FRAME_R(7), r7
1024 ld.q SP, FRAME_R(8), r8
1025 ld.q SP, FRAME_R(9), r9
1026 ld.q SP, FRAME_R(10), r10
1027 ld.q SP, FRAME_R(11), r11
1028 ld.q SP, FRAME_R(12), r12
1029 ld.q SP, FRAME_R(13), r13
1030 ld.q SP, FRAME_R(14), r14
1031
1032 ld.q SP, FRAME_R(16), r16
1033 ld.q SP, FRAME_R(17), r17
1034 ld.q SP, FRAME_R(18), r18
1035 ld.q SP, FRAME_R(19), r19
1036 ld.q SP, FRAME_R(20), r20
1037 ld.q SP, FRAME_R(21), r21
1038 ld.q SP, FRAME_R(22), r22
1039 ld.q SP, FRAME_R(23), r23
1040 ld.q SP, FRAME_R(24), r24
1041 ld.q SP, FRAME_R(25), r25
1042 ld.q SP, FRAME_R(26), r26
1043 ld.q SP, FRAME_R(27), r27
1044 ld.q SP, FRAME_R(28), r28
1045 ld.q SP, FRAME_R(29), r29
1046 ld.q SP, FRAME_R(30), r30
1047 ld.q SP, FRAME_R(31), r31
1048 ld.q SP, FRAME_R(32), r32
1049 ld.q SP, FRAME_R(33), r33
1050 ld.q SP, FRAME_R(34), r34
1051 ld.q SP, FRAME_R(35), r35
1052 ld.q SP, FRAME_R(36), r36
1053 ld.q SP, FRAME_R(37), r37
1054 ld.q SP, FRAME_R(38), r38
1055 ld.q SP, FRAME_R(39), r39
1056 ld.q SP, FRAME_R(40), r40
1057 ld.q SP, FRAME_R(41), r41
1058 ld.q SP, FRAME_R(42), r42
1059 ld.q SP, FRAME_R(43), r43
1060 ld.q SP, FRAME_R(44), r44
1061 ld.q SP, FRAME_R(45), r45
1062 ld.q SP, FRAME_R(46), r46
1063 ld.q SP, FRAME_R(47), r47
1064 ld.q SP, FRAME_R(48), r48
1065 ld.q SP, FRAME_R(49), r49
1066 ld.q SP, FRAME_R(50), r50
1067 ld.q SP, FRAME_R(51), r51
1068 ld.q SP, FRAME_R(52), r52
1069 ld.q SP, FRAME_R(53), r53
1070 ld.q SP, FRAME_R(54), r54
1071 ld.q SP, FRAME_R(55), r55
1072 ld.q SP, FRAME_R(56), r56
1073 ld.q SP, FRAME_R(57), r57
1074 ld.q SP, FRAME_R(58), r58
1075
1076 getcon SR, r59
1077 movi SR_BLOCK_EXC, r60
1078 or r59, r60, r59
1079 putcon r59, SR /* SR.BL = 1, keep nesting out */
1080 ld.q SP, FRAME_S(FSSR), r61
1081 ld.q SP, FRAME_S(FSPC), r62
1082 movi SR_ASID_MASK, r60
1083 and r59, r60, r59
1084 andc r61, r60, r61 /* Clear out older ASID */
1085 or r59, r61, r61 /* Retain current ASID */
1086 putcon r61, SSR
1087 putcon r62, SPC
1088
1089 /* Ignore FSYSCALL_ID */
1090
1091 ld.q SP, FRAME_R(59), r59
1092 ld.q SP, FRAME_R(60), r60
1093 ld.q SP, FRAME_R(61), r61
1094 ld.q SP, FRAME_R(62), r62
1095
1096 /* Last touch */
1097 ld.q SP, FRAME_R(15), SP
1098 rte
1099 nop
1100
1101/*
1102 * Third level handlers for VBR-based exceptions. Adapting args to
1103 * and/or deflecting to fourth level handlers.
1104 *
1105 * Fourth level handlers interface.
1106 * Most are C-coded handlers directly pointed by the trap_jtable.
1107 * (Third = Fourth level)
1108 * Inputs:
1109 * (r2) fault/interrupt code, entry number (e.g. NMI = 14,
1110 * IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
1111 * (r3) struct pt_regs *, original register's frame pointer
1112 * (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
1113 * (r5) TRA control register (for syscall/debug benefit only)
1114 * (LINK) return address
1115 * (SP) = r3
1116 *
1117 * Kernel TLB fault handlers will get a slightly different interface.
1118 * (r2) struct pt_regs *, original register's frame pointer
1119 * (r3) writeaccess, whether it's a store fault as opposed to load fault
1120 * (r4) execaccess, whether it's a ITLB fault as opposed to DTLB fault
1121 * (r5) Effective Address of fault
1122 * (LINK) return address
1123 * (SP) = r2
1124 *
1125 * fpu_error_or_IRQ? is a helper to deflect to the right cause.
1126 *
1127 */
ccd80587 1128#ifdef CONFIG_MMU
1da177e4
LT
1129tlb_miss_load:
1130 or SP, ZERO, r2
1131 or ZERO, ZERO, r3 /* Read */
1132 or ZERO, ZERO, r4 /* Data */
1133 getcon TEA, r5
1134 pta call_do_page_fault, tr0
1135 beq ZERO, ZERO, tr0
1136
1137tlb_miss_store:
1138 or SP, ZERO, r2
1139 movi 1, r3 /* Write */
1140 or ZERO, ZERO, r4 /* Data */
1141 getcon TEA, r5
1142 pta call_do_page_fault, tr0
1143 beq ZERO, ZERO, tr0
1144
1145itlb_miss_or_IRQ:
1146 pta its_IRQ, tr0
1147 beqi/u r4, EVENT_INTERRUPT, tr0
1148 or SP, ZERO, r2
1149 or ZERO, ZERO, r3 /* Read */
1150 movi 1, r4 /* Text */
1151 getcon TEA, r5
1152 /* Fall through */
1153
1154call_do_page_fault:
1155 movi do_page_fault, r6
1156 ptabs r6, tr0
1157 blink tr0, ZERO
ccd80587 1158#endif /* CONFIG_MMU */
1da177e4
LT
1159
1160fpu_error_or_IRQA:
1161 pta its_IRQ, tr0
1162 beqi/l r4, EVENT_INTERRUPT, tr0
1163#ifdef CONFIG_SH_FPU
1164 movi do_fpu_state_restore, r6
1165#else
1166 movi do_exception_error, r6
1167#endif
1168 ptabs r6, tr0
1169 blink tr0, ZERO
1170
1171fpu_error_or_IRQB:
1172 pta its_IRQ, tr0
1173 beqi/l r4, EVENT_INTERRUPT, tr0
1174#ifdef CONFIG_SH_FPU
1175 movi do_fpu_state_restore, r6
1176#else
1177 movi do_exception_error, r6
1178#endif
1179 ptabs r6, tr0
1180 blink tr0, ZERO
1181
1182its_IRQ:
1183 movi do_IRQ, r6
1184 ptabs r6, tr0
1185 blink tr0, ZERO
1186
1187/*
1188 * system_call/unknown_trap third level handler:
1189 *
1190 * Inputs:
1191 * (r2) fault/interrupt code, entry number (TRAP = 11)
1192 * (r3) struct pt_regs *, original register's frame pointer
1193 * (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
1194 * (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
1195 * (SP) = r3
1196 * (LINK) return address: ret_from_exception
1197 * (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
1198 *
1199 * Outputs:
1200 * (*r3) Syscall reply (Saved r2)
1201 * (LINK) In case of syscall only it can be scrapped.
1202 * Common second level post handler will be ret_from_syscall.
1203 * Common (non-trace) exit point to that is syscall_ret (saving
1204 * result to r2). Common bad exit point is syscall_bad (returning
1205 * ENOSYS then saved to r2).
1206 *
1207 */
1208
1209unknown_trap:
1210 /* Unknown Trap or User Trace */
1211 movi do_unknown_trapa, r6
1212 ptabs r6, tr0
1213 ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
1214 andi r2, 0x1ff, r2 /* r2 = syscall # */
1215 blink tr0, LINK
1216
1217 pta syscall_ret, tr0
1218 blink tr0, ZERO
1219
1220 /* New syscall implementation*/
1221system_call:
1222 pta unknown_trap, tr0
1223 or r5, ZERO, r4 /* TRA (=r5) -> r4 */
1224 shlri r4, 20, r4
1225 bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
1226
1227 /* It's a system call */
1228 st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
1229 andi r5, 0x1ff, r5 /* syscall # -> r5 */
1230
1231 STI()
1232
1233 pta syscall_allowed, tr0
1234 movi NR_syscalls - 1, r4 /* Last valid */
1235 bgeu/l r4, r5, tr0
1236
1237syscall_bad:
1238 /* Return ENOSYS ! */
1239 movi -(ENOSYS), r2 /* Fall-through */
1240
1241 .global syscall_ret
1242syscall_ret:
1243 st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
1244
1245#ifdef CONFIG_POOR_MANS_STRACE
1246 /* nothing useful in registers at this point */
1247
1248 movi evt_debug2, r5
1249 ori r5, 1, r5
1250 ptabs r5, tr0
1251 ld.q SP, FRAME_R(9), r2
1252 or SP, ZERO, r3
1253 blink tr0, LINK
1254#endif
1255
1256 ld.q SP, FRAME_S(FSPC), r2
1257 addi r2, 4, r2 /* Move PC, being pre-execution event */
1258 st.q SP, FRAME_S(FSPC), r2
1259 pta ret_from_syscall, tr0
1260 blink tr0, ZERO
1261
1262
1263/* A different return path for ret_from_fork, because we now need
1264 * to call schedule_tail with the later kernels. Because prev is
1265 * loaded into r2 by switch_to() means we can just call it straight away
1266 */
1267
1268.global ret_from_fork
1269ret_from_fork:
1270
1271 movi schedule_tail,r5
1272 ori r5, 1, r5
1273 ptabs r5, tr0
1274 blink tr0, LINK
1275
1276#ifdef CONFIG_POOR_MANS_STRACE
1277 /* nothing useful in registers at this point */
1278
1279 movi evt_debug2, r5
1280 ori r5, 1, r5
1281 ptabs r5, tr0
1282 ld.q SP, FRAME_R(9), r2
1283 or SP, ZERO, r3
1284 blink tr0, LINK
1285#endif
1286
1287 ld.q SP, FRAME_S(FSPC), r2
1288 addi r2, 4, r2 /* Move PC, being pre-execution event */
1289 st.q SP, FRAME_S(FSPC), r2
1290 pta ret_from_syscall, tr0
1291 blink tr0, ZERO
1292
1293
1294
1295syscall_allowed:
1296 /* Use LINK to deflect the exit point, default is syscall_ret */
1297 pta syscall_ret, tr0
1298 gettr tr0, LINK
1299 pta syscall_notrace, tr0
1300
1301 getcon KCR0, r2
1302 ld.l r2, TI_FLAGS, r4
4b27c47c 1303 movi (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | _TIF_SYSCALL_AUDIT), r6
1da177e4
LT
1304 and r6, r4, r6
1305 beq/l r6, ZERO, tr0
1306
1307 /* Trace it by calling syscall_trace before and after */
1308 movi syscall_trace, r4
4b27c47c
PM
1309 or SP, ZERO, r2
1310 or ZERO, ZERO, r3
1da177e4
LT
1311 ptabs r4, tr0
1312 blink tr0, LINK
4b27c47c 1313
1da177e4
LT
1314 /* Reload syscall number as r5 is trashed by syscall_trace */
1315 ld.q SP, FRAME_S(FSYSCALL_ID), r5
1316 andi r5, 0x1ff, r5
1317
1318 pta syscall_ret_trace, tr0
1319 gettr tr0, LINK
1320
1321syscall_notrace:
1322 /* Now point to the appropriate 4th level syscall handler */
1323 movi sys_call_table, r4
1324 shlli r5, 2, r5
1325 ldx.l r4, r5, r5
1326 ptabs r5, tr0
1327
1328 /* Prepare original args */
1329 ld.q SP, FRAME_R(2), r2
1330 ld.q SP, FRAME_R(3), r3
1331 ld.q SP, FRAME_R(4), r4
1332 ld.q SP, FRAME_R(5), r5
1333 ld.q SP, FRAME_R(6), r6
1334 ld.q SP, FRAME_R(7), r7
1335
1336 /* And now the trick for those syscalls requiring regs * ! */
1337 or SP, ZERO, r8
1338
1339 /* Call it */
1340 blink tr0, ZERO /* LINK is already properly set */
1341
1342syscall_ret_trace:
1343 /* We get back here only if under trace */
1344 st.q SP, FRAME_R(9), r2 /* Save return value */
1345
1346 movi syscall_trace, LINK
4b27c47c
PM
1347 or SP, ZERO, r2
1348 movi 1, r3
1da177e4
LT
1349 ptabs LINK, tr0
1350 blink tr0, LINK
1351
1352 /* This needs to be done after any syscall tracing */
1353 ld.q SP, FRAME_S(FSPC), r2
1354 addi r2, 4, r2 /* Move PC, being pre-execution event */
1355 st.q SP, FRAME_S(FSPC), r2
1356
1357 pta ret_from_syscall, tr0
1358 blink tr0, ZERO /* Resume normal return sequence */
1359
1360/*
1361 * --- Switch to running under a particular ASID and return the previous ASID value
1362 * --- The caller is assumed to have done a cli before calling this.
1363 *
1364 * Input r2 : new ASID
1365 * Output r2 : old ASID
1366 */
1367
1368 .global switch_and_save_asid
1369switch_and_save_asid:
1370 getcon sr, r0
1371 movi 255, r4
1372 shlli r4, 16, r4 /* r4 = mask to select ASID */
1373 and r0, r4, r3 /* r3 = shifted old ASID */
1374 andi r2, 255, r2 /* mask down new ASID */
1375 shlli r2, 16, r2 /* align new ASID against SR.ASID */
1376 andc r0, r4, r0 /* efface old ASID from SR */
1377 or r0, r2, r0 /* insert the new ASID */
1378 putcon r0, ssr
1379 movi 1f, r0
1380 putcon r0, spc
1381 rte
1382 nop
13831:
1384 ptabs LINK, tr0
1385 shlri r3, 16, r2 /* r2 = old ASID */
1386 blink tr0, r63
1387
1388 .global route_to_panic_handler
1389route_to_panic_handler:
1390 /* Switch to real mode, goto panic_handler, don't return. Useful for
1391 last-chance debugging, e.g. if no output wants to go to the console.
1392 */
1393
36763b22 1394 movi panic_handler - CONFIG_PAGE_OFFSET, r1
1da177e4
LT
1395 ptabs r1, tr0
1396 pta 1f, tr1
1397 gettr tr1, r0
1398 putcon r0, spc
1399 getcon sr, r0
1400 movi 1, r1
1401 shlli r1, 31, r1
1402 andc r0, r1, r0
1403 putcon r0, ssr
1404 rte
1405 nop
14061: /* Now in real mode */
1407 blink tr0, r63
1408 nop
1409
1410 .global peek_real_address_q
1411peek_real_address_q:
1412 /* Two args:
1413 r2 : real mode address to peek
1414 r2(out) : result quadword
1415
1416 This is provided as a cheapskate way of manipulating device
1417 registers for debugging (to avoid the need to onchip_remap the debug
1418 module, and to avoid the need to onchip_remap the watchpoint
1419 controller in a way that identity maps sufficient bits to avoid the
1420 SH5-101 cut2 silicon defect).
1421
1422 This code is not performance critical
1423 */
1424
1425 add.l r2, r63, r2 /* sign extend address */
1426 getcon sr, r0 /* r0 = saved original SR */
1427 movi 1, r1
1428 shlli r1, 28, r1
1429 or r0, r1, r1 /* r0 with block bit set */
1430 putcon r1, sr /* now in critical section */
1431 movi 1, r36
1432 shlli r36, 31, r36
1433 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1434
1435 putcon r1, ssr
36763b22 1436 movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1da177e4
LT
1437 movi 1f, r37 /* virtual mode return addr */
1438 putcon r36, spc
1439
1440 synco
1441 rte
1442 nop
1443
1444.peek0: /* come here in real mode, don't touch caches!!
1445 still in critical section (sr.bl==1) */
1446 putcon r0, ssr
1447 putcon r37, spc
1448 /* Here's the actual peek. If the address is bad, all bets are now off
1449 * what will happen (handlers invoked in real-mode = bad news) */
1450 ld.q r2, 0, r2
1451 synco
1452 rte /* Back to virtual mode */
1453 nop
1454
14551:
1456 ptabs LINK, tr0
1457 blink tr0, r63
1458
1459 .global poke_real_address_q
1460poke_real_address_q:
1461 /* Two args:
1462 r2 : real mode address to poke
1463 r3 : quadword value to write.
1464
1465 This is provided as a cheapskate way of manipulating device
1466 registers for debugging (to avoid the need to onchip_remap the debug
1467 module, and to avoid the need to onchip_remap the watchpoint
1468 controller in a way that identity maps sufficient bits to avoid the
1469 SH5-101 cut2 silicon defect).
1470
1471 This code is not performance critical
1472 */
1473
1474 add.l r2, r63, r2 /* sign extend address */
1475 getcon sr, r0 /* r0 = saved original SR */
1476 movi 1, r1
1477 shlli r1, 28, r1
1478 or r0, r1, r1 /* r0 with block bit set */
1479 putcon r1, sr /* now in critical section */
1480 movi 1, r36
1481 shlli r36, 31, r36
1482 andc r1, r36, r1 /* turn sr.mmu off in real mode section */
1483
1484 putcon r1, ssr
36763b22 1485 movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
1da177e4
LT
1486 movi 1f, r37 /* virtual mode return addr */
1487 putcon r36, spc
1488
1489 synco
1490 rte
1491 nop
1492
1493.poke0: /* come here in real mode, don't touch caches!!
1494 still in critical section (sr.bl==1) */
1495 putcon r0, ssr
1496 putcon r37, spc
1497 /* Here's the actual poke. If the address is bad, all bets are now off
1498 * what will happen (handlers invoked in real-mode = bad news) */
1499 st.q r2, 0, r3
1500 synco
1501 rte /* Back to virtual mode */
1502 nop
1503
15041:
1505 ptabs LINK, tr0
1506 blink tr0, r63
1507
ccd80587 1508#ifdef CONFIG_MMU
1da177e4
LT
1509/*
1510 * --- User Access Handling Section
1511 */
1512
1513/*
1514 * User Access support. It all moved to non inlined Assembler
1515 * functions in here.
1516 *
1517 * __kernel_size_t __copy_user(void *__to, const void *__from,
1518 * __kernel_size_t __n)
1519 *
1520 * Inputs:
1521 * (r2) target address
1522 * (r3) source address
1523 * (r4) size in bytes
1524 *
1525 * Ouputs:
1526 * (*r2) target data
1527 * (r2) non-copied bytes
1528 *
1529 * If a fault occurs on the user pointer, bail out early and return the
1530 * number of bytes not copied in r2.
1531 * Strategy : for large blocks, call a real memcpy function which can
1532 * move >1 byte at a time using unaligned ld/st instructions, and can
1533 * manipulate the cache using prefetch + alloco to improve the speed
1534 * further. If a fault occurs in that function, just revert to the
1535 * byte-by-byte approach used for small blocks; this is rare so the
1536 * performance hit for that case does not matter.
1537 *
1538 * For small blocks it's not worth the overhead of setting up and calling
1539 * the memcpy routine; do the copy a byte at a time.
1540 *
1541 */
1542 .global __copy_user
1543__copy_user:
1544 pta __copy_user_byte_by_byte, tr1
1545 movi 16, r0 ! this value is a best guess, should tune it by benchmarking
1546 bge/u r0, r4, tr1
1547 pta copy_user_memcpy, tr0
1548 addi SP, -32, SP
1549 /* Save arguments in case we have to fix-up unhandled page fault */
1550 st.q SP, 0, r2
1551 st.q SP, 8, r3
1552 st.q SP, 16, r4
1553 st.q SP, 24, r35 ! r35 is callee-save
1554 /* Save LINK in a register to reduce RTS time later (otherwise
1555 ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
1556 ori LINK, 0, r35
1557 blink tr0, LINK
1558
1559 /* Copy completed normally if we get back here */
1560 ptabs r35, tr0
1561 ld.q SP, 24, r35
1562 /* don't restore r2-r4, pointless */
1563 /* set result=r2 to zero as the copy must have succeeded. */
1564 or r63, r63, r2
1565 addi SP, 32, SP
1566 blink tr0, r63 ! RTS
1567
1568 .global __copy_user_fixup
1569__copy_user_fixup:
1570 /* Restore stack frame */
1571 ori r35, 0, LINK
1572 ld.q SP, 24, r35
1573 ld.q SP, 16, r4
1574 ld.q SP, 8, r3
1575 ld.q SP, 0, r2
1576 addi SP, 32, SP
1577 /* Fall through to original code, in the 'same' state we entered with */
1578
1579/* The slow byte-by-byte method is used if the fast copy traps due to a bad
1580 user address. In that rare case, the speed drop can be tolerated. */
1581__copy_user_byte_by_byte:
1582 pta ___copy_user_exit, tr1
1583 pta ___copy_user1, tr0
1584 beq/u r4, r63, tr1 /* early exit for zero length copy */
1585 sub r2, r3, r0
1586 addi r0, -1, r0
1587
1588___copy_user1:
1589 ld.b r3, 0, r5 /* Fault address 1 */
1590
1591 /* Could rewrite this to use just 1 add, but the second comes 'free'
1592 due to load latency */
1593 addi r3, 1, r3
1594 addi r4, -1, r4 /* No real fixup required */
1595___copy_user2:
1596 stx.b r3, r0, r5 /* Fault address 2 */
1597 bne r4, ZERO, tr0
1598
1599___copy_user_exit:
1600 or r4, ZERO, r2
1601 ptabs LINK, tr0
1602 blink tr0, ZERO
1603
1604/*
1605 * __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
1606 *
1607 * Inputs:
1608 * (r2) target address
1609 * (r3) size in bytes
1610 *
1611 * Ouputs:
1612 * (*r2) zero-ed target data
1613 * (r2) non-zero-ed bytes
1614 */
1615 .global __clear_user
1616__clear_user:
1617 pta ___clear_user_exit, tr1
1618 pta ___clear_user1, tr0
1619 beq/u r3, r63, tr1
1620
1621___clear_user1:
1622 st.b r2, 0, ZERO /* Fault address */
1623 addi r2, 1, r2
1624 addi r3, -1, r3 /* No real fixup required */
1625 bne r3, ZERO, tr0
1626
1627___clear_user_exit:
1628 or r3, ZERO, r2
1629 ptabs LINK, tr0
1630 blink tr0, ZERO
1631
ccd80587 1632#endif /* CONFIG_MMU */
1da177e4
LT
1633
1634/*
1635 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1636 * int __count)
1637 *
1638 * Inputs:
1639 * (r2) target address
1640 * (r3) source address
1641 * (r4) maximum size in bytes
1642 *
1643 * Ouputs:
1644 * (*r2) copied data
1645 * (r2) -EFAULT (in case of faulting)
1646 * copied data (otherwise)
1647 */
1648 .global __strncpy_from_user
1649__strncpy_from_user:
1650 pta ___strncpy_from_user1, tr0
1651 pta ___strncpy_from_user_done, tr1
1652 or r4, ZERO, r5 /* r5 = original count */
1653 beq/u r4, r63, tr1 /* early exit if r4==0 */
1654 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1655 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1656
1657___strncpy_from_user1:
1658 ld.b r3, 0, r7 /* Fault address: only in reading */
1659 st.b r2, 0, r7
1660 addi r2, 1, r2
1661 addi r3, 1, r3
1662 beq/u ZERO, r7, tr1
1663 addi r4, -1, r4 /* return real number of copied bytes */
1664 bne/l ZERO, r4, tr0
1665
1666___strncpy_from_user_done:
1667 sub r5, r4, r6 /* If done, return copied */
1668
1669___strncpy_from_user_exit:
1670 or r6, ZERO, r2
1671 ptabs LINK, tr0
1672 blink tr0, ZERO
1673
1674/*
1675 * extern long __strnlen_user(const char *__s, long __n)
1676 *
1677 * Inputs:
1678 * (r2) source address
1679 * (r3) source size in bytes
1680 *
1681 * Ouputs:
1682 * (r2) -EFAULT (in case of faulting)
1683 * string length (otherwise)
1684 */
1685 .global __strnlen_user
1686__strnlen_user:
1687 pta ___strnlen_user_set_reply, tr0
1688 pta ___strnlen_user1, tr1
1689 or ZERO, ZERO, r5 /* r5 = counter */
1690 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1691 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1692 beq r3, ZERO, tr0
1693
1694___strnlen_user1:
1695 ldx.b r2, r5, r7 /* Fault address: only in reading */
1696 addi r3, -1, r3 /* No real fixup */
1697 addi r5, 1, r5
1698 beq r3, ZERO, tr0
1699 bne r7, ZERO, tr1
1700! The line below used to be active. This meant led to a junk byte lying between each pair
1701! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1702! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1703! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1704! addi r5, 1, r5 /* Include '\0' */
1705
1706___strnlen_user_set_reply:
1707 or r5, ZERO, r6 /* If done, return counter */
1708
1709___strnlen_user_exit:
1710 or r6, ZERO, r2
1711 ptabs LINK, tr0
1712 blink tr0, ZERO
1713
1714/*
1715 * extern long __get_user_asm_?(void *val, long addr)
1716 *
1717 * Inputs:
1718 * (r2) dest address
1719 * (r3) source address (in User Space)
1720 *
1721 * Ouputs:
1722 * (r2) -EFAULT (faulting)
1723 * 0 (not faulting)
1724 */
1725 .global __get_user_asm_b
1726__get_user_asm_b:
1727 or r2, ZERO, r4
1728 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1729
1730___get_user_asm_b1:
1731 ld.b r3, 0, r5 /* r5 = data */
1732 st.b r4, 0, r5
1733 or ZERO, ZERO, r2
1734
1735___get_user_asm_b_exit:
1736 ptabs LINK, tr0
1737 blink tr0, ZERO
1738
1739
1740 .global __get_user_asm_w
1741__get_user_asm_w:
1742 or r2, ZERO, r4
1743 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1744
1745___get_user_asm_w1:
1746 ld.w r3, 0, r5 /* r5 = data */
1747 st.w r4, 0, r5
1748 or ZERO, ZERO, r2
1749
1750___get_user_asm_w_exit:
1751 ptabs LINK, tr0
1752 blink tr0, ZERO
1753
1754
1755 .global __get_user_asm_l
1756__get_user_asm_l:
1757 or r2, ZERO, r4
1758 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1759
1760___get_user_asm_l1:
1761 ld.l r3, 0, r5 /* r5 = data */
1762 st.l r4, 0, r5
1763 or ZERO, ZERO, r2
1764
1765___get_user_asm_l_exit:
1766 ptabs LINK, tr0
1767 blink tr0, ZERO
1768
1769
1770 .global __get_user_asm_q
1771__get_user_asm_q:
1772 or r2, ZERO, r4
1773 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1774
1775___get_user_asm_q1:
1776 ld.q r3, 0, r5 /* r5 = data */
1777 st.q r4, 0, r5
1778 or ZERO, ZERO, r2
1779
1780___get_user_asm_q_exit:
1781 ptabs LINK, tr0
1782 blink tr0, ZERO
1783
1784/*
1785 * extern long __put_user_asm_?(void *pval, long addr)
1786 *
1787 * Inputs:
1788 * (r2) kernel pointer to value
1789 * (r3) dest address (in User Space)
1790 *
1791 * Ouputs:
1792 * (r2) -EFAULT (faulting)
1793 * 0 (not faulting)
1794 */
1795 .global __put_user_asm_b
1796__put_user_asm_b:
1797 ld.b r2, 0, r4 /* r4 = data */
1798 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1799
1800___put_user_asm_b1:
1801 st.b r3, 0, r4
1802 or ZERO, ZERO, r2
1803
1804___put_user_asm_b_exit:
1805 ptabs LINK, tr0
1806 blink tr0, ZERO
1807
1808
1809 .global __put_user_asm_w
1810__put_user_asm_w:
1811 ld.w r2, 0, r4 /* r4 = data */
1812 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1813
1814___put_user_asm_w1:
1815 st.w r3, 0, r4
1816 or ZERO, ZERO, r2
1817
1818___put_user_asm_w_exit:
1819 ptabs LINK, tr0
1820 blink tr0, ZERO
1821
1822
1823 .global __put_user_asm_l
1824__put_user_asm_l:
1825 ld.l r2, 0, r4 /* r4 = data */
1826 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1827
1828___put_user_asm_l1:
1829 st.l r3, 0, r4
1830 or ZERO, ZERO, r2
1831
1832___put_user_asm_l_exit:
1833 ptabs LINK, tr0
1834 blink tr0, ZERO
1835
1836
1837 .global __put_user_asm_q
1838__put_user_asm_q:
1839 ld.q r2, 0, r4 /* r4 = data */
1840 movi -(EFAULT), r2 /* r2 = reply, no real fixup */
1841
1842___put_user_asm_q1:
1843 st.q r3, 0, r4
1844 or ZERO, ZERO, r2
1845
1846___put_user_asm_q_exit:
1847 ptabs LINK, tr0
1848 blink tr0, ZERO
1849
1850panic_stash_regs:
1851 /* The idea is : when we get an unhandled panic, we dump the registers
1852 to a known memory location, the just sit in a tight loop.
1853 This allows the human to look at the memory region through the GDB
1854 session (assuming the debug module's SHwy initiator isn't locked up
1855 or anything), to hopefully analyze the cause of the panic. */
1856
1857 /* On entry, former r15 (SP) is in DCR
1858 former r0 is at resvec_saved_area + 0
1859 former r1 is at resvec_saved_area + 8
1860 former tr0 is at resvec_saved_area + 32
1861 DCR is the only register whose value is lost altogether.
1862 */
1863
1864 movi 0xffffffff80000000, r0 ! phy of dump area
1865 ld.q SP, 0x000, r1 ! former r0
1866 st.q r0, 0x000, r1
1867 ld.q SP, 0x008, r1 ! former r1
1868 st.q r0, 0x008, r1
1869 st.q r0, 0x010, r2
1870 st.q r0, 0x018, r3
1871 st.q r0, 0x020, r4
1872 st.q r0, 0x028, r5
1873 st.q r0, 0x030, r6
1874 st.q r0, 0x038, r7
1875 st.q r0, 0x040, r8
1876 st.q r0, 0x048, r9
1877 st.q r0, 0x050, r10
1878 st.q r0, 0x058, r11
1879 st.q r0, 0x060, r12
1880 st.q r0, 0x068, r13
1881 st.q r0, 0x070, r14
1882 getcon dcr, r14
1883 st.q r0, 0x078, r14
1884 st.q r0, 0x080, r16
1885 st.q r0, 0x088, r17
1886 st.q r0, 0x090, r18
1887 st.q r0, 0x098, r19
1888 st.q r0, 0x0a0, r20
1889 st.q r0, 0x0a8, r21
1890 st.q r0, 0x0b0, r22
1891 st.q r0, 0x0b8, r23
1892 st.q r0, 0x0c0, r24
1893 st.q r0, 0x0c8, r25
1894 st.q r0, 0x0d0, r26
1895 st.q r0, 0x0d8, r27
1896 st.q r0, 0x0e0, r28
1897 st.q r0, 0x0e8, r29
1898 st.q r0, 0x0f0, r30
1899 st.q r0, 0x0f8, r31
1900 st.q r0, 0x100, r32
1901 st.q r0, 0x108, r33
1902 st.q r0, 0x110, r34
1903 st.q r0, 0x118, r35
1904 st.q r0, 0x120, r36
1905 st.q r0, 0x128, r37
1906 st.q r0, 0x130, r38
1907 st.q r0, 0x138, r39
1908 st.q r0, 0x140, r40
1909 st.q r0, 0x148, r41
1910 st.q r0, 0x150, r42
1911 st.q r0, 0x158, r43
1912 st.q r0, 0x160, r44
1913 st.q r0, 0x168, r45
1914 st.q r0, 0x170, r46
1915 st.q r0, 0x178, r47
1916 st.q r0, 0x180, r48
1917 st.q r0, 0x188, r49
1918 st.q r0, 0x190, r50
1919 st.q r0, 0x198, r51
1920 st.q r0, 0x1a0, r52
1921 st.q r0, 0x1a8, r53
1922 st.q r0, 0x1b0, r54
1923 st.q r0, 0x1b8, r55
1924 st.q r0, 0x1c0, r56
1925 st.q r0, 0x1c8, r57
1926 st.q r0, 0x1d0, r58
1927 st.q r0, 0x1d8, r59
1928 st.q r0, 0x1e0, r60
1929 st.q r0, 0x1e8, r61
1930 st.q r0, 0x1f0, r62
1931 st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
1932
1933 ld.q SP, 0x020, r1 ! former tr0
1934 st.q r0, 0x200, r1
1935 gettr tr1, r1
1936 st.q r0, 0x208, r1
1937 gettr tr2, r1
1938 st.q r0, 0x210, r1
1939 gettr tr3, r1
1940 st.q r0, 0x218, r1
1941 gettr tr4, r1
1942 st.q r0, 0x220, r1
1943 gettr tr5, r1
1944 st.q r0, 0x228, r1
1945 gettr tr6, r1
1946 st.q r0, 0x230, r1
1947 gettr tr7, r1
1948 st.q r0, 0x238, r1
1949
1950 getcon sr, r1
1951 getcon ssr, r2
1952 getcon pssr, r3
1953 getcon spc, r4
1954 getcon pspc, r5
1955 getcon intevt, r6
1956 getcon expevt, r7
1957 getcon pexpevt, r8
1958 getcon tra, r9
1959 getcon tea, r10
1960 getcon kcr0, r11
1961 getcon kcr1, r12
1962 getcon vbr, r13
1963 getcon resvec, r14
1964
1965 st.q r0, 0x240, r1
1966 st.q r0, 0x248, r2
1967 st.q r0, 0x250, r3
1968 st.q r0, 0x258, r4
1969 st.q r0, 0x260, r5
1970 st.q r0, 0x268, r6
1971 st.q r0, 0x270, r7
1972 st.q r0, 0x278, r8
1973 st.q r0, 0x280, r9
1974 st.q r0, 0x288, r10
1975 st.q r0, 0x290, r11
1976 st.q r0, 0x298, r12
1977 st.q r0, 0x2a0, r13
1978 st.q r0, 0x2a8, r14
1979
1980 getcon SPC,r2
1981 getcon SSR,r3
1982 getcon EXPEVT,r4
1983 /* Prepare to jump to C - physical address */
36763b22 1984 movi panic_handler-CONFIG_PAGE_OFFSET, r1
1da177e4
LT
1985 ori r1, 1, r1
1986 ptabs r1, tr0
1987 getcon DCR, SP
1988 blink tr0, ZERO
1989 nop
1990 nop
1991 nop
1992 nop
1993
1994
1995
1996
1997/*
1998 * --- Signal Handling Section
1999 */
2000
2001/*
2002 * extern long long _sa_default_rt_restorer
2003 * extern long long _sa_default_restorer
2004 *
2005 * or, better,
2006 *
2007 * extern void _sa_default_rt_restorer(void)
2008 * extern void _sa_default_restorer(void)
2009 *
2010 * Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
2011 * from user space. Copied into user space by signal management.
2012 * Both must be quad aligned and 2 quad long (4 instructions).
2013 *
2014 */
2015 .balign 8
2016 .global sa_default_rt_restorer
2017sa_default_rt_restorer:
2018 movi 0x10, r9
2019 shori __NR_rt_sigreturn, r9
2020 trapa r9
2021 nop
2022
2023 .balign 8
2024 .global sa_default_restorer
2025sa_default_restorer:
2026 movi 0x10, r9
2027 shori __NR_sigreturn, r9
2028 trapa r9
2029 nop
2030
2031/*
2032 * --- __ex_table Section
2033 */
2034
2035/*
2036 * User Access Exception Table.
2037 */
2038 .section __ex_table, "a"
2039
2040 .global asm_uaccess_start /* Just a marker */
2041asm_uaccess_start:
2042
ccd80587 2043#ifdef CONFIG_MMU
1da177e4
LT
2044 .long ___copy_user1, ___copy_user_exit
2045 .long ___copy_user2, ___copy_user_exit
2046 .long ___clear_user1, ___clear_user_exit
ccd80587 2047#endif
1da177e4
LT
2048 .long ___strncpy_from_user1, ___strncpy_from_user_exit
2049 .long ___strnlen_user1, ___strnlen_user_exit
2050 .long ___get_user_asm_b1, ___get_user_asm_b_exit
2051 .long ___get_user_asm_w1, ___get_user_asm_w_exit
2052 .long ___get_user_asm_l1, ___get_user_asm_l_exit
2053 .long ___get_user_asm_q1, ___get_user_asm_q_exit
2054 .long ___put_user_asm_b1, ___put_user_asm_b_exit
2055 .long ___put_user_asm_w1, ___put_user_asm_w_exit
2056 .long ___put_user_asm_l1, ___put_user_asm_l_exit
2057 .long ___put_user_asm_q1, ___put_user_asm_q_exit
2058
2059 .global asm_uaccess_end /* Just a marker */
2060asm_uaccess_end:
2061
2062
2063
2064
2065/*
2066 * --- .text.init Section
2067 */
2068
2069 .section .text.init, "ax"
2070
2071/*
2072 * void trap_init (void)
2073 *
2074 */
2075 .global trap_init
2076trap_init:
2077 addi SP, -24, SP /* Room to save r28/r29/r30 */
2078 st.q SP, 0, r28
2079 st.q SP, 8, r29
2080 st.q SP, 16, r30
2081
2082 /* Set VBR and RESVEC */
2083 movi LVBR_block, r19
2084 andi r19, -4, r19 /* reset MMUOFF + reserved */
2085 /* For RESVEC exceptions we force the MMU off, which means we need the
2086 physical address. */
36763b22 2087 movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
1da177e4
LT
2088 andi r20, -4, r20 /* reset reserved */
2089 ori r20, 1, r20 /* set MMUOFF */
2090 putcon r19, VBR
2091 putcon r20, RESVEC
2092
2093 /* Sanity check */
2094 movi LVBR_block_end, r21
2095 andi r21, -4, r21
2096 movi BLOCK_SIZE, r29 /* r29 = expected size */
2097 or r19, ZERO, r30
2098 add r19, r29, r19
2099
2100 /*
2101 * Ugly, but better loop forever now than crash afterwards.
2102 * We should print a message, but if we touch LVBR or
2103 * LRESVEC blocks we should not be surprised if we get stuck
2104 * in trap_init().
2105 */
2106 pta trap_init_loop, tr1
2107 gettr tr1, r28 /* r28 = trap_init_loop */
2108 sub r21, r30, r30 /* r30 = actual size */
2109
2110 /*
2111 * VBR/RESVEC handlers overlap by being bigger than
2112 * allowed. Very bad. Just loop forever.
2113 * (r28) panic/loop address
2114 * (r29) expected size
2115 * (r30) actual size
2116 */
2117trap_init_loop:
2118 bne r19, r21, tr1
2119
2120 /* Now that exception vectors are set up reset SR.BL */
2121 getcon SR, r22
2122 movi SR_UNBLOCK_EXC, r23
2123 and r22, r23, r22
2124 putcon r22, SR
2125
2126 addi SP, 24, SP
2127 ptabs LINK, tr0
2128 blink tr0, ZERO
2129