Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: entry.S,v 1.37 2004/06/11 13:02:46 doyu Exp $ |
2 | * | |
3 | * linux/arch/sh/entry.S | |
4 | * | |
5 | * Copyright (C) 1999, 2000, 2002 Niibe Yutaka | |
6 | * Copyright (C) 2003 Paul Mundt | |
7 | * | |
8 | * This file is subject to the terms and conditions of the GNU General Public | |
9 | * License. See the file "COPYING" in the main directory of this archive | |
10 | * for more details. | |
11 | * | |
12 | */ | |
13 | ||
14 | #include <linux/sys.h> | |
15 | #include <linux/linkage.h> | |
1da177e4 LT |
16 | #include <asm/asm-offsets.h> |
17 | #include <asm/thread_info.h> | |
091904ae | 18 | #include <asm/cpu/mmu_context.h> |
1da177e4 LT |
19 | #include <asm/unistd.h> |
20 | ||
1da177e4 LT |
21 | ! NOTE: |
22 | ! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address | |
23 | ! to be jumped is too far, but it causes illegal slot exception. | |
24 | ||
25 | /* | |
26 | * entry.S contains the system-call and fault low-level handling routines. | |
27 | * This also contains the timer-interrupt handler, as well as all interrupts | |
28 | * and faults that can result in a task-switch. | |
29 | * | |
30 | * NOTE: This code handles signal-recognition, which happens every time | |
31 | * after a timer-interrupt and after each system call. | |
32 | * | |
33 | * NOTE: This code uses a convention that instructions in the delay slot | |
34 | * of a transfer-control instruction are indented by an extra space, thus: | |
35 | * | |
36 | * jmp @k0 ! control-transfer instruction | |
37 | * ldc k1, ssr ! delay slot | |
38 | * | |
39 | * Stack layout in 'ret_from_syscall': | |
40 | * ptrace needs to have all regs on the stack. | |
41 | * if the order here is changed, it needs to be | |
42 | * updated in ptrace.c and ptrace.h | |
43 | * | |
44 | * r0 | |
45 | * ... | |
46 | * r15 = stack pointer | |
47 | * spc | |
48 | * pr | |
49 | * ssr | |
50 | * gbr | |
51 | * mach | |
52 | * macl | |
53 | * syscall # | |
54 | * | |
55 | */ | |
56 | ||
57 | ENOSYS = 38 | |
58 | EINVAL = 22 | |
59 | ||
1da177e4 LT |
60 | #if defined(CONFIG_KGDB_NMI) |
61 | NMI_VEC = 0x1c0 ! Must catch early for debounce | |
62 | #endif | |
63 | ||
64 | /* Offsets to the stack */ | |
65 | OFF_R0 = 0 /* Return value. New ABI also arg4 */ | |
66 | OFF_R1 = 4 /* New ABI: arg5 */ | |
67 | OFF_R2 = 8 /* New ABI: arg6 */ | |
68 | OFF_R3 = 12 /* New ABI: syscall_nr */ | |
69 | OFF_R4 = 16 /* New ABI: arg0 */ | |
70 | OFF_R5 = 20 /* New ABI: arg1 */ | |
71 | OFF_R6 = 24 /* New ABI: arg2 */ | |
72 | OFF_R7 = 28 /* New ABI: arg3 */ | |
73 | OFF_SP = (15*4) | |
74 | OFF_PC = (16*4) | |
75 | OFF_SR = (16*4+8) | |
76 | OFF_TRA = (16*4+6*4) | |
77 | ||
78 | ||
79 | #define k0 r0 | |
80 | #define k1 r1 | |
81 | #define k2 r2 | |
82 | #define k3 r3 | |
83 | #define k4 r4 | |
84 | ||
85 | #define k_ex_code r2_bank /* r2_bank1 */ | |
86 | #define g_imask r6 /* r6_bank1 */ | |
87 | #define k_g_imask r6_bank /* r6_bank1 */ | |
88 | #define current r7 /* r7_bank1 */ | |
89 | ||
90 | /* | |
91 | * Kernel mode register usage: | |
92 | * k0 scratch | |
93 | * k1 scratch | |
94 | * k2 scratch (Exception code) | |
95 | * k3 scratch (Return address) | |
96 | * k4 scratch | |
97 | * k5 reserved | |
98 | * k6 Global Interrupt Mask (0--15 << 4) | |
99 | * k7 CURRENT_THREAD_INFO (pointer to current thread info) | |
100 | */ | |
101 | ||
102 | ! | |
103 | ! TLB Miss / Initial Page write exception handling | |
104 | ! _and_ | |
105 | ! TLB hits, but the access violate the protection. | |
106 | ! It can be valid access, such as stack grow and/or C-O-W. | |
107 | ! | |
108 | ! | |
109 | ! Find the pmd/pte entry and loadtlb | |
110 | ! If it's not found, cause address error (SEGV) | |
111 | ! | |
112 | ! Although this could be written in assembly language (and it'd be faster), | |
113 | ! this first version depends *much* on C implementation. | |
114 | ! | |
115 | ||
116 | #define CLI() \ | |
117 | stc sr, r0; \ | |
118 | or #0xf0, r0; \ | |
119 | ldc r0, sr | |
120 | ||
121 | #define STI() \ | |
122 | mov.l __INV_IMASK, r11; \ | |
123 | stc sr, r10; \ | |
124 | and r11, r10; \ | |
125 | stc k_g_imask, r11; \ | |
126 | or r11, r10; \ | |
127 | ldc r10, sr | |
128 | ||
129 | #if defined(CONFIG_PREEMPT) | |
130 | # define preempt_stop() CLI() | |
131 | #else | |
132 | # define preempt_stop() | |
133 | # define resume_kernel restore_all | |
134 | #endif | |
135 | ||
136 | #if defined(CONFIG_MMU) | |
137 | .align 2 | |
138 | ENTRY(tlb_miss_load) | |
139 | bra call_dpf | |
140 | mov #0, r5 | |
141 | ||
142 | .align 2 | |
143 | ENTRY(tlb_miss_store) | |
144 | bra call_dpf | |
145 | mov #1, r5 | |
146 | ||
147 | .align 2 | |
148 | ENTRY(initial_page_write) | |
149 | bra call_dpf | |
150 | mov #1, r5 | |
151 | ||
152 | .align 2 | |
153 | ENTRY(tlb_protection_violation_load) | |
154 | bra call_dpf | |
155 | mov #0, r5 | |
156 | ||
157 | .align 2 | |
158 | ENTRY(tlb_protection_violation_store) | |
159 | bra call_dpf | |
160 | mov #1, r5 | |
161 | ||
162 | call_dpf: | |
163 | mov.l 1f, r0 | |
164 | mov r5, r8 | |
165 | mov.l @r0, r6 | |
166 | mov r6, r9 | |
167 | mov.l 2f, r0 | |
168 | sts pr, r10 | |
169 | jsr @r0 | |
170 | mov r15, r4 | |
171 | ! | |
172 | tst r0, r0 | |
173 | bf/s 0f | |
174 | lds r10, pr | |
175 | rts | |
176 | nop | |
177 | 0: STI() | |
178 | mov.l 3f, r0 | |
179 | mov r9, r6 | |
180 | mov r8, r5 | |
181 | jmp @r0 | |
182 | mov r15, r4 | |
183 | ||
184 | .align 2 | |
185 | 1: .long MMU_TEA | |
186 | 2: .long __do_page_fault | |
187 | 3: .long do_page_fault | |
188 | ||
189 | .align 2 | |
190 | ENTRY(address_error_load) | |
191 | bra call_dae | |
192 | mov #0,r5 ! writeaccess = 0 | |
193 | ||
194 | .align 2 | |
195 | ENTRY(address_error_store) | |
196 | bra call_dae | |
197 | mov #1,r5 ! writeaccess = 1 | |
198 | ||
199 | .align 2 | |
200 | call_dae: | |
201 | mov.l 1f, r0 | |
202 | mov.l @r0, r6 ! address | |
203 | mov.l 2f, r0 | |
204 | jmp @r0 | |
205 | mov r15, r4 ! regs | |
206 | ||
207 | .align 2 | |
208 | 1: .long MMU_TEA | |
209 | 2: .long do_address_error | |
210 | #endif /* CONFIG_MMU */ | |
211 | ||
212 | #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | |
213 | ! Handle kernel debug if either kgdb (SW) or gdb-stub (FW) is present. | |
214 | ! If both are configured, handle the debug traps (breakpoints) in SW, | |
215 | ! but still allow BIOS traps to FW. | |
216 | ||
217 | .align 2 | |
218 | debug_kernel: | |
219 | #if defined(CONFIG_SH_STANDARD_BIOS) && defined(CONFIG_SH_KGDB) | |
220 | /* Force BIOS call to FW (debug_trap put TRA in r8) */ | |
221 | mov r8,r0 | |
222 | shlr2 r0 | |
223 | cmp/eq #0x3f,r0 | |
224 | bt debug_kernel_fw | |
225 | #endif /* CONFIG_SH_STANDARD_BIOS && CONFIG_SH_KGDB */ | |
226 | ||
227 | debug_enter: | |
228 | #if defined(CONFIG_SH_KGDB) | |
229 | /* Jump to kgdb, pass stacked regs as arg */ | |
230 | debug_kernel_sw: | |
231 | mov.l 3f, r0 | |
232 | jmp @r0 | |
233 | mov r15, r4 | |
234 | .align 2 | |
235 | 3: .long kgdb_handle_exception | |
236 | #endif /* CONFIG_SH_KGDB */ | |
237 | ||
238 | #if defined(CONFIG_SH_STANDARD_BIOS) | |
239 | /* Unwind the stack and jmp to the debug entry */ | |
240 | debug_kernel_fw: | |
241 | mov.l @r15+, r0 | |
242 | mov.l @r15+, r1 | |
243 | mov.l @r15+, r2 | |
244 | mov.l @r15+, r3 | |
245 | mov.l @r15+, r4 | |
246 | mov.l @r15+, r5 | |
247 | mov.l @r15+, r6 | |
248 | mov.l @r15+, r7 | |
249 | stc sr, r8 | |
250 | mov.l 1f, r9 ! BL =1, RB=1, IMASK=0x0F | |
251 | or r9, r8 | |
252 | ldc r8, sr ! here, change the register bank | |
253 | mov.l @r15+, r8 | |
254 | mov.l @r15+, r9 | |
255 | mov.l @r15+, r10 | |
256 | mov.l @r15+, r11 | |
257 | mov.l @r15+, r12 | |
258 | mov.l @r15+, r13 | |
259 | mov.l @r15+, r14 | |
260 | mov.l @r15+, k0 | |
261 | ldc.l @r15+, spc | |
262 | lds.l @r15+, pr | |
263 | mov.l @r15+, k1 | |
264 | ldc.l @r15+, gbr | |
265 | lds.l @r15+, mach | |
266 | lds.l @r15+, macl | |
267 | mov k0, r15 | |
268 | ! | |
269 | mov.l 2f, k0 | |
270 | mov.l @k0, k0 | |
271 | jmp @k0 | |
272 | ldc k1, ssr | |
273 | .align 2 | |
274 | 1: .long 0x300000f0 | |
275 | 2: .long gdb_vbr_vector | |
276 | #endif /* CONFIG_SH_STANDARD_BIOS */ | |
277 | ||
278 | #endif /* CONFIG_SH_STANDARD_BIOS || CONFIG_SH_KGDB */ | |
279 | ||
280 | ||
281 | .align 2 | |
282 | debug_trap: | |
283 | #if defined(CONFIG_SH_STANDARD_BIOS) || defined(CONFIG_SH_KGDB) | |
284 | mov #OFF_SR, r0 | |
285 | mov.l @(r0,r15), r0 ! get status register | |
286 | shll r0 | |
287 | shll r0 ! kernel space? | |
288 | bt/s debug_kernel | |
289 | #endif | |
290 | mov.l @r15, r0 ! Restore R0 value | |
291 | mov.l 1f, r8 | |
292 | jmp @r8 | |
293 | nop | |
294 | ||
295 | .align 2 | |
296 | ENTRY(exception_error) | |
297 | ! | |
298 | STI() | |
299 | mov.l 2f, r0 | |
300 | jmp @r0 | |
301 | nop | |
302 | ||
303 | ! | |
304 | .align 2 | |
305 | 1: .long break_point_trap_software | |
306 | 2: .long do_exception_error | |
307 | ||
308 | .align 2 | |
309 | ret_from_exception: | |
310 | preempt_stop() | |
3aa770e7 | 311 | ENTRY(ret_from_irq) |
1da177e4 LT |
312 | ! |
313 | mov #OFF_SR, r0 | |
314 | mov.l @(r0,r15), r0 ! get status register | |
315 | shll r0 | |
316 | shll r0 ! kernel space? | |
317 | bt/s resume_kernel ! Yes, it's from kernel, go back soon | |
318 | GET_THREAD_INFO(r8) | |
319 | ||
320 | #ifdef CONFIG_PREEMPT | |
321 | bra resume_userspace | |
322 | nop | |
323 | ENTRY(resume_kernel) | |
324 | mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count | |
325 | tst r0, r0 | |
326 | bf noresched | |
327 | need_resched: | |
328 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | |
329 | tst #_TIF_NEED_RESCHED, r0 ! need_resched set? | |
330 | bt noresched | |
331 | ||
332 | mov #OFF_SR, r0 | |
333 | mov.l @(r0,r15), r0 ! get status register | |
334 | and #0xf0, r0 ! interrupts off (exception path)? | |
335 | cmp/eq #0xf0, r0 | |
336 | bt noresched | |
337 | ||
338 | mov.l 1f, r0 | |
339 | mov.l r0, @(TI_PRE_COUNT,r8) | |
340 | ||
341 | STI() | |
342 | mov.l 2f, r0 | |
343 | jsr @r0 | |
344 | nop | |
345 | mov #0, r0 | |
346 | mov.l r0, @(TI_PRE_COUNT,r8) | |
347 | CLI() | |
348 | ||
349 | bra need_resched | |
350 | nop | |
351 | noresched: | |
352 | bra restore_all | |
353 | nop | |
354 | ||
355 | .align 2 | |
356 | 1: .long PREEMPT_ACTIVE | |
357 | 2: .long schedule | |
358 | #endif | |
359 | ||
360 | ENTRY(resume_userspace) | |
361 | ! r8: current_thread_info | |
362 | CLI() | |
363 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | |
364 | tst #_TIF_WORK_MASK, r0 | |
365 | bt/s restore_all | |
366 | tst #_TIF_NEED_RESCHED, r0 | |
367 | ||
368 | .align 2 | |
369 | work_pending: | |
370 | ! r0: current_thread_info->flags | |
371 | ! r8: current_thread_info | |
372 | ! t: result of "tst #_TIF_NEED_RESCHED, r0" | |
373 | bf/s work_resched | |
9f23e7e9 | 374 | tst #(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK), r0 |
1da177e4 LT |
375 | work_notifysig: |
376 | bt/s restore_all | |
377 | mov r15, r4 | |
9f23e7e9 PM |
378 | mov r12, r5 ! set arg1(save_r0) |
379 | mov r0, r6 | |
1da177e4 LT |
380 | mov.l 2f, r1 |
381 | mova restore_all, r0 | |
382 | jmp @r1 | |
383 | lds r0, pr | |
384 | work_resched: | |
385 | #ifndef CONFIG_PREEMPT | |
386 | ! gUSA handling | |
387 | mov.l @(OFF_SP,r15), r0 ! get user space stack pointer | |
388 | mov r0, r1 | |
389 | shll r0 | |
390 | bf/s 1f | |
391 | shll r0 | |
392 | bf/s 1f | |
393 | mov #OFF_PC, r0 | |
394 | ! SP >= 0xc0000000 : gUSA mark | |
395 | mov.l @(r0,r15), r2 ! get user space PC (program counter) | |
396 | mov.l @(OFF_R0,r15), r3 ! end point | |
397 | cmp/hs r3, r2 ! r2 >= r3? | |
398 | bt 1f | |
399 | add r3, r1 ! rewind point #2 | |
400 | mov.l r1, @(r0,r15) ! reset PC to rewind point #2 | |
401 | ! | |
402 | 1: | |
403 | #endif | |
404 | mov.l 1f, r1 | |
405 | jsr @r1 ! schedule | |
406 | nop | |
407 | CLI() | |
408 | ! | |
409 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | |
410 | tst #_TIF_WORK_MASK, r0 | |
411 | bt restore_all | |
412 | bra work_pending | |
413 | tst #_TIF_NEED_RESCHED, r0 | |
414 | ||
415 | .align 2 | |
416 | 1: .long schedule | |
9f23e7e9 | 417 | 2: .long do_notify_resume |
1da177e4 LT |
418 | |
419 | .align 2 | |
420 | syscall_exit_work: | |
421 | ! r0: current_thread_info->flags | |
422 | ! r8: current_thread_info | |
423 | tst #_TIF_SYSCALL_TRACE, r0 | |
424 | bt/s work_pending | |
425 | tst #_TIF_NEED_RESCHED, r0 | |
426 | STI() | |
427 | ! XXX setup arguments... | |
428 | mov.l 4f, r0 ! do_syscall_trace | |
429 | jsr @r0 | |
430 | nop | |
431 | bra resume_userspace | |
432 | nop | |
433 | ||
434 | .align 2 | |
435 | syscall_trace_entry: | |
436 | ! Yes it is traced. | |
437 | ! XXX setup arguments... | |
438 | mov.l 4f, r11 ! Call do_syscall_trace which notifies | |
439 | jsr @r11 ! superior (will chomp R[0-7]) | |
440 | nop | |
441 | ! Reload R0-R4 from kernel stack, where the | |
442 | ! parent may have modified them using | |
443 | ! ptrace(POKEUSR). (Note that R0-R2 are | |
444 | ! used by the system call handler directly | |
445 | ! from the kernel stack anyway, so don't need | |
446 | ! to be reloaded here.) This allows the parent | |
447 | ! to rewrite system calls and args on the fly. | |
448 | mov.l @(OFF_R4,r15), r4 ! arg0 | |
449 | mov.l @(OFF_R5,r15), r5 | |
450 | mov.l @(OFF_R6,r15), r6 | |
451 | mov.l @(OFF_R7,r15), r7 ! arg3 | |
452 | mov.l @(OFF_R3,r15), r3 ! syscall_nr | |
453 | ! Arrange for do_syscall_trace to be called | |
454 | ! again as the system call returns. | |
455 | mov.l 2f, r10 ! Number of syscalls | |
456 | cmp/hs r10, r3 | |
457 | bf syscall_call | |
458 | mov #-ENOSYS, r0 | |
459 | bra syscall_exit | |
460 | mov.l r0, @(OFF_R0,r15) ! Return value | |
461 | ||
462 | /* | |
463 | * Syscall interface: | |
464 | * | |
465 | * Syscall #: R3 | |
466 | * Arguments #0 to #3: R4--R7 | |
467 | * Arguments #4 to #6: R0, R1, R2 | |
468 | * TRA: (number of arguments + 0x10) x 4 | |
469 | * | |
470 | * This code also handles delegating other traps to the BIOS/gdb stub | |
471 | * according to: | |
472 | * | |
473 | * Trap number | |
474 | * (TRA>>2) Purpose | |
475 | * -------- ------- | |
476 | * 0x0-0xf old syscall ABI | |
477 | * 0x10-0x1f new syscall ABI | |
478 | * 0x20-0xff delegated through debug_trap to BIOS/gdb stub. | |
479 | * | |
480 | * Note: When we're first called, the TRA value must be shifted | |
481 | * right 2 bits in order to get the value that was used as the "trapa" | |
482 | * argument. | |
483 | */ | |
484 | ||
485 | .align 2 | |
486 | .globl ret_from_fork | |
487 | ret_from_fork: | |
488 | mov.l 1f, r8 | |
489 | jsr @r8 | |
490 | mov r0, r4 | |
491 | bra syscall_exit | |
492 | nop | |
493 | .align 2 | |
494 | 1: .long schedule_tail | |
495 | ! | |
496 | ENTRY(system_call) | |
497 | mov.l 1f, r9 | |
498 | mov.l @r9, r8 ! Read from TRA (Trap Address) Register | |
499 | ! | |
500 | ! Is the trap argument >= 0x20? (TRA will be >= 0x80) | |
501 | mov #0x7f, r9 | |
502 | cmp/hi r9, r8 | |
503 | bt/s 0f | |
504 | mov #OFF_TRA, r9 | |
505 | add r15, r9 | |
506 | ! | |
507 | mov.l r8, @r9 ! set TRA value to tra | |
508 | STI() | |
509 | ! Call the system call handler through the table. | |
510 | ! First check for bad syscall number | |
511 | mov r3, r9 | |
512 | mov.l 2f, r8 ! Number of syscalls | |
513 | cmp/hs r8, r9 | |
514 | bf/s good_system_call | |
515 | GET_THREAD_INFO(r8) | |
516 | syscall_badsys: ! Bad syscall number | |
517 | mov #-ENOSYS, r0 | |
518 | bra resume_userspace | |
519 | mov.l r0, @(OFF_R0,r15) ! Return value | |
520 | ! | |
521 | 0: | |
522 | bra debug_trap | |
523 | nop | |
524 | ! | |
525 | good_system_call: ! Good syscall number | |
526 | mov.l @(TI_FLAGS,r8), r8 | |
527 | mov #_TIF_SYSCALL_TRACE, r10 | |
528 | tst r10, r8 | |
529 | bf syscall_trace_entry | |
530 | ! | |
531 | syscall_call: | |
532 | shll2 r9 ! x4 | |
533 | mov.l 3f, r8 ! Load the address of sys_call_table | |
534 | add r8, r9 | |
535 | mov.l @r9, r8 | |
536 | jsr @r8 ! jump to specific syscall handler | |
537 | nop | |
0b892935 | 538 | mov.l @(OFF_R0,r15), r12 ! save r0 |
1da177e4 LT |
539 | mov.l r0, @(OFF_R0,r15) ! save the return value |
540 | ! | |
541 | syscall_exit: | |
542 | CLI() | |
543 | ! | |
544 | GET_THREAD_INFO(r8) | |
545 | mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags | |
546 | tst #_TIF_ALLWORK_MASK, r0 | |
547 | bf syscall_exit_work | |
548 | restore_all: | |
549 | mov.l @r15+, r0 | |
550 | mov.l @r15+, r1 | |
551 | mov.l @r15+, r2 | |
552 | mov.l @r15+, r3 | |
553 | mov.l @r15+, r4 | |
554 | mov.l @r15+, r5 | |
555 | mov.l @r15+, r6 | |
556 | mov.l @r15+, r7 | |
557 | ! | |
558 | stc sr, r8 | |
559 | mov.l 7f, r9 | |
560 | or r9, r8 ! BL =1, RB=1 | |
561 | ldc r8, sr ! here, change the register bank | |
562 | ! | |
563 | mov.l @r15+, r8 | |
564 | mov.l @r15+, r9 | |
565 | mov.l @r15+, r10 | |
566 | mov.l @r15+, r11 | |
567 | mov.l @r15+, r12 | |
568 | mov.l @r15+, r13 | |
569 | mov.l @r15+, r14 | |
570 | mov.l @r15+, k4 ! original stack pointer | |
571 | ldc.l @r15+, spc | |
572 | lds.l @r15+, pr | |
573 | mov.l @r15+, k3 ! original SR | |
574 | ldc.l @r15+, gbr | |
575 | lds.l @r15+, mach | |
576 | lds.l @r15+, macl | |
577 | add #4, r15 ! Skip syscall number | |
578 | ! | |
579 | #ifdef CONFIG_SH_DSP | |
580 | mov.l @r15+, k0 ! DSP mode marker | |
581 | mov.l 5f, k1 | |
582 | cmp/eq k0, k1 ! Do we have a DSP stack frame? | |
583 | bf skip_restore | |
584 | ||
585 | stc sr, k0 ! Enable CPU DSP mode | |
586 | or k1, k0 ! (within kernel it may be disabled) | |
587 | ldc k0, sr | |
588 | mov r2, k0 ! Backup r2 | |
589 | ||
590 | ! Restore DSP registers from stack | |
591 | mov r15, r2 | |
592 | movs.l @r2+, a1 | |
593 | movs.l @r2+, a0g | |
594 | movs.l @r2+, a1g | |
595 | movs.l @r2+, m0 | |
596 | movs.l @r2+, m1 | |
597 | mov r2, r15 | |
598 | ||
599 | lds.l @r15+, a0 | |
600 | lds.l @r15+, x0 | |
601 | lds.l @r15+, x1 | |
602 | lds.l @r15+, y0 | |
603 | lds.l @r15+, y1 | |
604 | lds.l @r15+, dsr | |
605 | ldc.l @r15+, rs | |
606 | ldc.l @r15+, re | |
607 | ldc.l @r15+, mod | |
608 | ||
609 | mov k0, r2 ! Restore r2 | |
610 | skip_restore: | |
611 | #endif | |
612 | ! | |
613 | ! Calculate new SR value | |
614 | mov k3, k2 ! original SR value | |
615 | mov.l 9f, k1 | |
616 | and k1, k2 ! Mask orignal SR value | |
617 | ! | |
618 | mov k3, k0 ! Calculate IMASK-bits | |
619 | shlr2 k0 | |
620 | and #0x3c, k0 | |
621 | cmp/eq #0x3c, k0 | |
622 | bt/s 6f | |
623 | shll2 k0 | |
624 | mov g_imask, k0 | |
625 | ! | |
626 | 6: or k0, k2 ! Set the IMASK-bits | |
627 | ldc k2, ssr | |
628 | ! | |
629 | #if defined(CONFIG_KGDB_NMI) | |
630 | ! Clear in_nmi | |
6ae5e8d7 | 631 | mov.l 6f, k0 |
1da177e4 LT |
632 | mov #0, k1 |
633 | mov.b k1, @k0 | |
634 | #endif | |
635 | mov.l @r15+, k2 ! restore EXPEVT | |
636 | mov k4, r15 | |
637 | rte | |
638 | nop | |
639 | ||
640 | .align 2 | |
641 | 1: .long TRA | |
642 | 2: .long NR_syscalls | |
643 | 3: .long sys_call_table | |
644 | 4: .long do_syscall_trace | |
645 | 5: .long 0x00001000 ! DSP | |
646 | 7: .long 0x30000000 | |
647 | 9: | |
648 | __INV_IMASK: | |
649 | .long 0xffffff0f ! ~(IMASK) | |
650 | ||
651 | ! Exception Vector Base | |
652 | ! | |
653 | ! Should be aligned page boundary. | |
654 | ! | |
655 | .balign 4096,0,4096 | |
656 | ENTRY(vbr_base) | |
657 | .long 0 | |
658 | ! | |
659 | .balign 256,0,256 | |
660 | general_exception: | |
661 | mov.l 1f, k2 | |
662 | mov.l 2f, k3 | |
663 | bra handle_exception | |
664 | mov.l @k2, k2 | |
665 | .align 2 | |
666 | 1: .long EXPEVT | |
667 | 2: .long ret_from_exception | |
668 | ! | |
669 | ! | |
670 | .balign 1024,0,1024 | |
671 | tlb_miss: | |
672 | mov.l 1f, k2 | |
673 | mov.l 4f, k3 | |
674 | bra handle_exception | |
675 | mov.l @k2, k2 | |
676 | ! | |
677 | .balign 512,0,512 | |
678 | interrupt: | |
679 | mov.l 2f, k2 | |
680 | mov.l 3f, k3 | |
681 | #if defined(CONFIG_KGDB_NMI) | |
682 | ! Debounce (filter nested NMI) | |
683 | mov.l @k2, k0 | |
684 | mov.l 5f, k1 | |
685 | cmp/eq k1, k0 | |
686 | bf 0f | |
687 | mov.l 6f, k1 | |
688 | tas.b @k1 | |
689 | bt 0f | |
690 | rte | |
691 | nop | |
692 | .align 2 | |
693 | 5: .long NMI_VEC | |
694 | 6: .long in_nmi | |
695 | 0: | |
696 | #endif /* defined(CONFIG_KGDB_NMI) */ | |
697 | bra handle_exception | |
698 | mov.l @k2, k2 | |
699 | ||
700 | .align 2 | |
701 | 1: .long EXPEVT | |
702 | 2: .long INTEVT | |
703 | 3: .long ret_from_irq | |
704 | 4: .long ret_from_exception | |
705 | ||
706 | ! | |
707 | ! | |
708 | .align 2 | |
3aa770e7 | 709 | ENTRY(handle_exception) |
1da177e4 LT |
710 | ! Using k0, k1 for scratch registers (r0_bank1, r1_bank), |
711 | ! save all registers onto stack. | |
712 | ! | |
713 | stc ssr, k0 ! Is it from kernel space? | |
714 | shll k0 ! Check MD bit (bit30) by shifting it into... | |
715 | shll k0 ! ...the T bit | |
716 | bt/s 1f ! It's a kernel to kernel transition. | |
717 | mov r15, k0 ! save original stack to k0 | |
718 | /* User space to kernel */ | |
a6a31139 PM |
719 | mov #(THREAD_SIZE >> 8), k1 |
720 | shll8 k1 ! k1 := THREAD_SIZE | |
1da177e4 LT |
721 | add current, k1 |
722 | mov k1, r15 ! change to kernel stack | |
723 | ! | |
724 | 1: mov #-1, k4 | |
725 | mov.l 2f, k1 | |
726 | ! | |
727 | #ifdef CONFIG_SH_DSP | |
728 | mov.l r2, @-r15 ! Save r2, we need another reg | |
729 | stc sr, k4 | |
730 | mov.l 1f, r2 | |
731 | tst r2, k4 ! Check if in DSP mode | |
732 | mov.l @r15+, r2 ! Restore r2 now | |
733 | bt/s skip_save | |
734 | mov #0, k4 ! Set marker for no stack frame | |
735 | ||
736 | mov r2, k4 ! Backup r2 (in k4) for later | |
737 | ||
738 | ! Save DSP registers on stack | |
739 | stc.l mod, @-r15 | |
740 | stc.l re, @-r15 | |
741 | stc.l rs, @-r15 | |
742 | sts.l dsr, @-r15 | |
743 | sts.l y1, @-r15 | |
744 | sts.l y0, @-r15 | |
745 | sts.l x1, @-r15 | |
746 | sts.l x0, @-r15 | |
747 | sts.l a0, @-r15 | |
748 | ||
749 | ! GAS is broken, does not generate correct "movs.l Ds,@-As" instr. | |
750 | ||
751 | ! FIXME: Make sure that this is still the case with newer toolchains, | |
752 | ! as we're not at all interested in supporting ancient toolchains at | |
753 | ! this point. -- PFM. | |
754 | ||
755 | mov r15, r2 | |
756 | .word 0xf653 ! movs.l a1, @-r2 | |
757 | .word 0xf6f3 ! movs.l a0g, @-r2 | |
758 | .word 0xf6d3 ! movs.l a1g, @-r2 | |
759 | .word 0xf6c3 ! movs.l m0, @-r2 | |
760 | .word 0xf6e3 ! movs.l m1, @-r2 | |
761 | mov r2, r15 | |
762 | ||
763 | mov k4, r2 ! Restore r2 | |
764 | mov.l 1f, k4 ! Force DSP stack frame | |
765 | skip_save: | |
766 | mov.l k4, @-r15 ! Push DSP mode marker onto stack | |
767 | #endif | |
768 | ! Save the user registers on the stack. | |
769 | mov.l k2, @-r15 ! EXPEVT | |
770 | mov.l k4, @-r15 ! set TRA (default: -1) | |
771 | ! | |
772 | sts.l macl, @-r15 | |
773 | sts.l mach, @-r15 | |
774 | stc.l gbr, @-r15 | |
775 | stc.l ssr, @-r15 | |
776 | sts.l pr, @-r15 | |
777 | stc.l spc, @-r15 | |
778 | ! | |
779 | lds k3, pr ! Set the return address to pr | |
780 | ! | |
781 | mov.l k0, @-r15 ! save orignal stack | |
782 | mov.l r14, @-r15 | |
783 | mov.l r13, @-r15 | |
784 | mov.l r12, @-r15 | |
785 | mov.l r11, @-r15 | |
786 | mov.l r10, @-r15 | |
787 | mov.l r9, @-r15 | |
788 | mov.l r8, @-r15 | |
789 | ! | |
790 | stc sr, r8 ! Back to normal register bank, and | |
791 | or k1, r8 ! Block all interrupts | |
792 | mov.l 3f, k1 | |
793 | and k1, r8 ! ... | |
794 | ldc r8, sr ! ...changed here. | |
795 | ! | |
796 | mov.l r7, @-r15 | |
797 | mov.l r6, @-r15 | |
798 | mov.l r5, @-r15 | |
799 | mov.l r4, @-r15 | |
800 | mov.l r3, @-r15 | |
801 | mov.l r2, @-r15 | |
802 | mov.l r1, @-r15 | |
803 | mov.l r0, @-r15 | |
804 | ! Then, dispatch to the handler, according to the exception code. | |
805 | stc k_ex_code, r8 | |
806 | shlr2 r8 | |
807 | shlr r8 | |
808 | mov.l 4f, r9 | |
809 | add r8, r9 | |
810 | mov.l @r9, r9 | |
811 | jmp @r9 | |
812 | nop | |
813 | ||
814 | .align 2 | |
815 | 1: .long 0x00001000 ! DSP=1 | |
816 | 2: .long 0x000080f0 ! FD=1, IMASK=15 | |
817 | 3: .long 0xcfffffff ! RB=0, BL=0 | |
818 | 4: .long exception_handling_table | |
819 | ||
820 | .align 2 | |
821 | ENTRY(exception_none) | |
822 | rts | |
823 | nop | |
824 |