Merge branch 'for-2.6.24' of master.kernel.org:/pub/scm/linux/kernel/git/galak/powerp...
[linux-2.6-block.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
9994a338
PM
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33
34#undef SHOW_SYSCALLS
35#undef SHOW_SYSCALLS_TASK
36
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
47#include "head_booke.h"
48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level) \
49 mtspr exc_level##_SPRG,r8; \
50 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level); \
51 lwz r0,GPR10-INT_FRAME_SIZE(r8); \
52 stw r0,GPR10(r11); \
53 lwz r0,GPR11-INT_FRAME_SIZE(r8); \
54 stw r0,GPR11(r11); \
55 mfspr r8,exc_level##_SPRG
56
57 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
59 TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
60 b transfer_to_handler_full
61
62 .globl debug_transfer_to_handler
63debug_transfer_to_handler:
64 TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
65 b transfer_to_handler_full
66
67 .globl crit_transfer_to_handler
68crit_transfer_to_handler:
69 TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
70 /* fall through */
71#endif
72
73#ifdef CONFIG_40x
74 .globl crit_transfer_to_handler
75crit_transfer_to_handler:
76 lwz r0,crit_r10@l(0)
77 stw r0,GPR10(r11)
78 lwz r0,crit_r11@l(0)
79 stw r0,GPR11(r11)
80 /* fall through */
81#endif
82
83/*
84 * This code finishes saving the registers to the exception frame
85 * and jumps to the appropriate handler for the exception, turning
86 * on address translation.
87 * Note that we rely on the caller having set cr0.eq iff the exception
88 * occurred in kernel mode (i.e. MSR:PR = 0).
89 */
90 .globl transfer_to_handler_full
91transfer_to_handler_full:
92 SAVE_NVGPRS(r11)
93 /* fall through */
94
95 .globl transfer_to_handler
96transfer_to_handler:
97 stw r2,GPR2(r11)
98 stw r12,_NIP(r11)
99 stw r9,_MSR(r11)
100 andi. r2,r9,MSR_PR
101 mfctr r12
102 mfspr r2,SPRN_XER
103 stw r12,_CTR(r11)
104 stw r2,_XER(r11)
105 mfspr r12,SPRN_SPRG3
106 addi r2,r12,-THREAD
107 tovirt(r2,r2) /* set r2 to current */
108 beq 2f /* if from user, fix up THREAD.regs */
109 addi r11,r1,STACK_FRAME_OVERHEAD
110 stw r11,PT_REGS(r12)
111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
112 /* Check to see if the dbcr0 register is set up to debug. Use the
113 single-step bit to do this. */
114 lwz r12,THREAD_DBCR0(r12)
115 andis. r12,r12,DBCR0_IC@h
116 beq+ 3f
117 /* From user and task is ptraced - load up global dbcr0 */
118 li r12,-1 /* clear all pending debug events */
119 mtspr SPRN_DBSR,r12
120 lis r11,global_dbcr0@ha
121 tophys(r11,r11)
122 addi r11,r11,global_dbcr0@l
123 lwz r12,0(r11)
124 mtspr SPRN_DBCR0,r12
125 lwz r12,4(r11)
126 addi r12,r12,-1
127 stw r12,4(r11)
128#endif
129 b 3f
f39224a8 130
9994a338
PM
1312: /* if from kernel, check interrupted DOZE/NAP mode and
132 * check for stack overflow
133 */
f39224a8
PM
134 lwz r9,THREAD_INFO-THREAD(r12)
135 cmplw r1,r9 /* if r1 <= current->thread_info */
136 ble- stack_ovf /* then the kernel stack overflowed */
1375:
9994a338 138#ifdef CONFIG_6xx
f39224a8
PM
139 tophys(r9,r9) /* check local flags */
140 lwz r12,TI_LOCAL_FLAGS(r9)
141 mtcrf 0x01,r12
142 bt- 31-TLF_NAPPING,4f
9994a338
PM
143#endif /* CONFIG_6xx */
144 .globl transfer_to_handler_cont
145transfer_to_handler_cont:
9994a338
PM
1463:
147 mflr r9
148 lwz r11,0(r9) /* virtual address of handler */
149 lwz r9,4(r9) /* where to go when done */
9994a338
PM
150 mtspr SPRN_SRR0,r11
151 mtspr SPRN_SRR1,r10
152 mtlr r9
153 SYNC
154 RFI /* jump to handler, enable MMU */
155
f39224a8
PM
156#ifdef CONFIG_6xx
1574: rlwinm r12,r12,0,~_TLF_NAPPING
158 stw r12,TI_LOCAL_FLAGS(r9)
159 b power_save_6xx_restore
a0652fc9
PM
160#endif
161
9994a338
PM
162/*
163 * On kernel stack overflow, load up an initial stack pointer
164 * and call StackOverflow(regs), which should not return.
165 */
166stack_ovf:
167 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
168 lis r12,_end@h
169 ori r12,r12,_end@l
170 cmplw r1,r12
171 ble 5b /* r1 <= &_end is OK */
9994a338
PM
172 SAVE_NVGPRS(r11)
173 addi r3,r1,STACK_FRAME_OVERHEAD
174 lis r1,init_thread_union@ha
175 addi r1,r1,init_thread_union@l
176 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
177 lis r9,StackOverflow@ha
178 addi r9,r9,StackOverflow@l
179 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
180 FIX_SRR1(r10,r12)
181 mtspr SPRN_SRR0,r9
182 mtspr SPRN_SRR1,r10
183 SYNC
184 RFI
185
186/*
187 * Handle a system call.
188 */
189 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
190 .stabs "entry_32.S",N_SO,0,0,0f
1910:
192
193_GLOBAL(DoSyscall)
9994a338
PM
194 stw r3,ORIG_GPR3(r1)
195 li r12,0
196 stw r12,RESULT(r1)
197 lwz r11,_CCR(r1) /* Clear SO bit in CR */
198 rlwinm r11,r11,0,4,2
199 stw r11,_CCR(r1)
200#ifdef SHOW_SYSCALLS
201 bl do_show_syscall
202#endif /* SHOW_SYSCALLS */
6cb7bfeb 203 rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338
PM
204 lwz r11,TI_FLAGS(r10)
205 andi. r11,r11,_TIF_SYSCALL_T_OR_A
206 bne- syscall_dotrace
207syscall_dotrace_cont:
208 cmplwi 0,r0,NR_syscalls
209 lis r10,sys_call_table@h
210 ori r10,r10,sys_call_table@l
211 slwi r0,r0,2
212 bge- 66f
213 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
214 mtlr r10
215 addi r9,r1,STACK_FRAME_OVERHEAD
216 PPC440EP_ERR42
217 blrl /* Call handler */
218 .globl ret_from_syscall
219ret_from_syscall:
220#ifdef SHOW_SYSCALLS
221 bl do_show_syscall_exit
222#endif
223 mr r6,r3
6cb7bfeb 224 rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */
9994a338 225 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 226 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
9994a338
PM
227 SYNC
228 MTMSRD(r10)
229 lwz r9,TI_FLAGS(r12)
401d1f02 230 li r8,-_LAST_ERRNO
1bd79336 231 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 232 bne- syscall_exit_work
401d1f02
DW
233 cmplw 0,r3,r8
234 blt+ syscall_exit_cont
235 lwz r11,_CCR(r1) /* Load CR */
236 neg r3,r3
237 oris r11,r11,0x1000 /* Set SO bit in CR */
238 stw r11,_CCR(r1)
9994a338
PM
239syscall_exit_cont:
240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
241 /* If the process has its own DBCR0 value, load it up. The single
242 step bit tells us that dbcr0 should be loaded. */
243 lwz r0,THREAD+THREAD_DBCR0(r2)
244 andis. r10,r0,DBCR0_IC@h
245 bnel- load_dbcr0
246#endif
b98ac05d
BH
247#ifdef CONFIG_44x
248 lis r4,icache_44x_need_flush@ha
249 lwz r5,icache_44x_need_flush@l(r4)
250 cmplwi cr0,r5,0
251 bne- 2f
2521:
253#endif /* CONFIG_44x */
9994a338
PM
254 stwcx. r0,0,r1 /* to clear the reservation */
255 lwz r4,_LINK(r1)
256 lwz r5,_CCR(r1)
257 mtlr r4
258 mtcr r5
259 lwz r7,_NIP(r1)
260 lwz r8,_MSR(r1)
261 FIX_SRR1(r8, r0)
262 lwz r2,GPR2(r1)
263 lwz r1,GPR1(r1)
264 mtspr SPRN_SRR0,r7
265 mtspr SPRN_SRR1,r8
266 SYNC
267 RFI
b98ac05d
BH
268#ifdef CONFIG_44x
2692: li r7,0
270 iccci r0,r0
271 stw r7,icache_44x_need_flush@l(r4)
272 b 1b
273#endif /* CONFIG_44x */
9994a338
PM
274
27566: li r3,-ENOSYS
276 b ret_from_syscall
277
278 .globl ret_from_fork
279ret_from_fork:
280 REST_NVGPRS(r1)
281 bl schedule_tail
282 li r3,0
283 b ret_from_syscall
284
285/* Traced system call support */
286syscall_dotrace:
287 SAVE_NVGPRS(r1)
288 li r0,0xc00
d73e0c99 289 stw r0,_TRAP(r1)
9994a338
PM
290 addi r3,r1,STACK_FRAME_OVERHEAD
291 bl do_syscall_trace_enter
292 lwz r0,GPR0(r1) /* Restore original registers */
293 lwz r3,GPR3(r1)
294 lwz r4,GPR4(r1)
295 lwz r5,GPR5(r1)
296 lwz r6,GPR6(r1)
297 lwz r7,GPR7(r1)
298 lwz r8,GPR8(r1)
299 REST_NVGPRS(r1)
300 b syscall_dotrace_cont
301
302syscall_exit_work:
401d1f02 303 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
304 beq+ 0f
305 REST_NVGPRS(r1)
306 b 2f
3070: cmplw 0,r3,r8
401d1f02
DW
308 blt+ 1f
309 andi. r0,r9,_TIF_NOERROR
310 bne- 1f
311 lwz r11,_CCR(r1) /* Load CR */
312 neg r3,r3
313 oris r11,r11,0x1000 /* Set SO bit in CR */
314 stw r11,_CCR(r1)
315
3161: stw r6,RESULT(r1) /* Save result */
9994a338 317 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
3182: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
319 beq 4f
320
1bd79336 321 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
322
323 li r11,_TIF_PERSYSCALL_MASK
324 addi r12,r12,TI_FLAGS
3253: lwarx r8,0,r12
326 andc r8,r8,r11
327#ifdef CONFIG_IBM405_ERR77
328 dcbt 0,r12
329#endif
330 stwcx. r8,0,r12
331 bne- 3b
332 subi r12,r12,TI_FLAGS
333
3344: /* Anything which requires enabling interrupts? */
1bd79336
PM
335 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
336 beq ret_from_except
337
338 /* Re-enable interrupts */
339 ori r10,r10,MSR_EE
340 SYNC
341 MTMSRD(r10)
401d1f02
DW
342
343 /* Save NVGPRS if they're not saved already */
d73e0c99 344 lwz r4,_TRAP(r1)
9994a338 345 andi. r4,r4,1
401d1f02 346 beq 5f
9994a338
PM
347 SAVE_NVGPRS(r1)
348 li r4,0xc00
d73e0c99 349 stw r4,_TRAP(r1)
1bd79336 3505:
9994a338
PM
351 addi r3,r1,STACK_FRAME_OVERHEAD
352 bl do_syscall_trace_leave
1bd79336 353 b ret_from_except_full
9994a338
PM
354
355#ifdef SHOW_SYSCALLS
356do_show_syscall:
357#ifdef SHOW_SYSCALLS_TASK
358 lis r11,show_syscalls_task@ha
359 lwz r11,show_syscalls_task@l(r11)
360 cmp 0,r2,r11
361 bnelr
362#endif
363 stw r31,GPR31(r1)
364 mflr r31
365 lis r3,7f@ha
366 addi r3,r3,7f@l
367 lwz r4,GPR0(r1)
368 lwz r5,GPR3(r1)
369 lwz r6,GPR4(r1)
370 lwz r7,GPR5(r1)
371 lwz r8,GPR6(r1)
372 lwz r9,GPR7(r1)
373 bl printk
374 lis r3,77f@ha
375 addi r3,r3,77f@l
376 lwz r4,GPR8(r1)
377 mr r5,r2
378 bl printk
379 lwz r0,GPR0(r1)
380 lwz r3,GPR3(r1)
381 lwz r4,GPR4(r1)
382 lwz r5,GPR5(r1)
383 lwz r6,GPR6(r1)
384 lwz r7,GPR7(r1)
385 lwz r8,GPR8(r1)
386 mtlr r31
387 lwz r31,GPR31(r1)
388 blr
389
390do_show_syscall_exit:
391#ifdef SHOW_SYSCALLS_TASK
392 lis r11,show_syscalls_task@ha
393 lwz r11,show_syscalls_task@l(r11)
394 cmp 0,r2,r11
395 bnelr
396#endif
397 stw r31,GPR31(r1)
398 mflr r31
399 stw r3,RESULT(r1) /* Save result */
400 mr r4,r3
401 lis r3,79f@ha
402 addi r3,r3,79f@l
403 bl printk
404 lwz r3,RESULT(r1)
405 mtlr r31
406 lwz r31,GPR31(r1)
407 blr
408
4097: .string "syscall %d(%x, %x, %x, %x, %x, "
41077: .string "%x), current=%p\n"
41179: .string " -> %x\n"
412 .align 2,0
413
414#ifdef SHOW_SYSCALLS_TASK
415 .data
416 .globl show_syscalls_task
417show_syscalls_task:
418 .long -1
419 .text
420#endif
421#endif /* SHOW_SYSCALLS */
422
423/*
401d1f02
DW
424 * The fork/clone functions need to copy the full register set into
425 * the child process. Therefore we need to save all the nonvolatile
426 * registers (r13 - r31) before calling the C code.
9994a338 427 */
9994a338
PM
428 .globl ppc_fork
429ppc_fork:
430 SAVE_NVGPRS(r1)
d73e0c99 431 lwz r0,_TRAP(r1)
9994a338 432 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 433 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
434 b sys_fork
435
436 .globl ppc_vfork
437ppc_vfork:
438 SAVE_NVGPRS(r1)
d73e0c99 439 lwz r0,_TRAP(r1)
9994a338 440 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 441 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
442 b sys_vfork
443
444 .globl ppc_clone
445ppc_clone:
446 SAVE_NVGPRS(r1)
d73e0c99 447 lwz r0,_TRAP(r1)
9994a338 448 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 449 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
450 b sys_clone
451
1bd79336
PM
452 .globl ppc_swapcontext
453ppc_swapcontext:
454 SAVE_NVGPRS(r1)
455 lwz r0,_TRAP(r1)
456 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
457 stw r0,_TRAP(r1) /* register set saved */
458 b sys_swapcontext
459
9994a338
PM
460/*
461 * Top-level page fault handling.
462 * This is in assembler because if do_page_fault tells us that
463 * it is a bad kernel page fault, we want to save the non-volatile
464 * registers before calling bad_page_fault.
465 */
466 .globl handle_page_fault
467handle_page_fault:
468 stw r4,_DAR(r1)
469 addi r3,r1,STACK_FRAME_OVERHEAD
470 bl do_page_fault
471 cmpwi r3,0
472 beq+ ret_from_except
473 SAVE_NVGPRS(r1)
d73e0c99 474 lwz r0,_TRAP(r1)
9994a338 475 clrrwi r0,r0,1
d73e0c99 476 stw r0,_TRAP(r1)
9994a338
PM
477 mr r5,r3
478 addi r3,r1,STACK_FRAME_OVERHEAD
479 lwz r4,_DAR(r1)
480 bl bad_page_fault
481 b ret_from_except_full
482
483/*
484 * This routine switches between two different tasks. The process
485 * state of one is saved on its kernel stack. Then the state
486 * of the other is restored from its kernel stack. The memory
487 * management hardware is updated to the second process's state.
488 * Finally, we can return to the second process.
489 * On entry, r3 points to the THREAD for the current task, r4
490 * points to the THREAD for the new task.
491 *
492 * This routine is always called with interrupts disabled.
493 *
494 * Note: there are two ways to get to the "going out" portion
495 * of this code; either by coming in via the entry (_switch)
496 * or via "fork" which must set up an environment equivalent
497 * to the "_switch" path. If you change this , you'll have to
498 * change the fork code also.
499 *
500 * The code which creates the new task context is in 'copy_thread'
501 * in arch/ppc/kernel/process.c
502 */
503_GLOBAL(_switch)
504 stwu r1,-INT_FRAME_SIZE(r1)
505 mflr r0
506 stw r0,INT_FRAME_SIZE+4(r1)
507 /* r3-r12 are caller saved -- Cort */
508 SAVE_NVGPRS(r1)
509 stw r0,_NIP(r1) /* Return to switch caller */
510 mfmsr r11
511 li r0,MSR_FP /* Disable floating-point */
512#ifdef CONFIG_ALTIVEC
513BEGIN_FTR_SECTION
514 oris r0,r0,MSR_VEC@h /* Disable altivec */
515 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
516 stw r12,THREAD+THREAD_VRSAVE(r2)
517END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
518#endif /* CONFIG_ALTIVEC */
519#ifdef CONFIG_SPE
5e14d21e 520BEGIN_FTR_SECTION
9994a338
PM
521 oris r0,r0,MSR_SPE@h /* Disable SPE */
522 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
523 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 524END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
525#endif /* CONFIG_SPE */
526 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
527 beq+ 1f
528 andc r11,r11,r0
529 MTMSRD(r11)
530 isync
5311: stw r11,_MSR(r1)
532 mfcr r10
533 stw r10,_CCR(r1)
534 stw r1,KSP(r3) /* Set old stack pointer */
535
536#ifdef CONFIG_SMP
537 /* We need a sync somewhere here to make sure that if the
538 * previous task gets rescheduled on another CPU, it sees all
539 * stores it has performed on this one.
540 */
541 sync
542#endif /* CONFIG_SMP */
543
544 tophys(r0,r4)
545 CLR_TOP32(r0)
546 mtspr SPRN_SPRG3,r0 /* Update current THREAD phys addr */
547 lwz r1,KSP(r4) /* Load new stack pointer */
548
549 /* save the old current 'last' for return value */
550 mr r3,r2
551 addi r2,r4,-THREAD /* Update current */
552
553#ifdef CONFIG_ALTIVEC
554BEGIN_FTR_SECTION
555 lwz r0,THREAD+THREAD_VRSAVE(r2)
556 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
557END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
558#endif /* CONFIG_ALTIVEC */
559#ifdef CONFIG_SPE
5e14d21e 560BEGIN_FTR_SECTION
9994a338
PM
561 lwz r0,THREAD+THREAD_SPEFSCR(r2)
562 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 563END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
564#endif /* CONFIG_SPE */
565
566 lwz r0,_CCR(r1)
567 mtcrf 0xFF,r0
568 /* r3-r12 are destroyed -- Cort */
569 REST_NVGPRS(r1)
570
571 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
572 mtlr r4
573 addi r1,r1,INT_FRAME_SIZE
574 blr
575
576 .globl fast_exception_return
577fast_exception_return:
578#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
579 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
580 beq 1f /* if not, we've got problems */
581#endif
582
5832: REST_4GPRS(3, r11)
584 lwz r10,_CCR(r11)
585 REST_GPR(1, r11)
586 mtcr r10
587 lwz r10,_LINK(r11)
588 mtlr r10
589 REST_GPR(10, r11)
590 mtspr SPRN_SRR1,r9
591 mtspr SPRN_SRR0,r12
592 REST_GPR(9, r11)
593 REST_GPR(12, r11)
594 lwz r11,GPR11(r11)
595 SYNC
596 RFI
597
598#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
599/* check if the exception happened in a restartable section */
6001: lis r3,exc_exit_restart_end@ha
601 addi r3,r3,exc_exit_restart_end@l
602 cmplw r12,r3
603 bge 3f
604 lis r4,exc_exit_restart@ha
605 addi r4,r4,exc_exit_restart@l
606 cmplw r12,r4
607 blt 3f
608 lis r3,fee_restarts@ha
609 tophys(r3,r3)
610 lwz r5,fee_restarts@l(r3)
611 addi r5,r5,1
612 stw r5,fee_restarts@l(r3)
613 mr r12,r4 /* restart at exc_exit_restart */
614 b 2b
615
991eb43a
KG
616 .section .bss
617 .align 2
618fee_restarts:
619 .space 4
620 .previous
9994a338
PM
621
622/* aargh, a nonrecoverable interrupt, panic */
623/* aargh, we don't know which trap this is */
624/* but the 601 doesn't implement the RI bit, so assume it's OK */
6253:
626BEGIN_FTR_SECTION
627 b 2b
628END_FTR_SECTION_IFSET(CPU_FTR_601)
629 li r10,-1
d73e0c99 630 stw r10,_TRAP(r11)
9994a338
PM
631 addi r3,r1,STACK_FRAME_OVERHEAD
632 lis r10,MSR_KERNEL@h
633 ori r10,r10,MSR_KERNEL@l
634 bl transfer_to_handler_full
635 .long nonrecoverable_exception
636 .long ret_from_except
637#endif
638
9994a338
PM
639 .globl ret_from_except_full
640ret_from_except_full:
641 REST_NVGPRS(r1)
642 /* fall through */
643
644 .globl ret_from_except
645ret_from_except:
646 /* Hard-disable interrupts so that current_thread_info()->flags
647 * can't change between when we test it and when we return
648 * from the interrupt. */
649 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
650 SYNC /* Some chip revs have problems here... */
651 MTMSRD(r10) /* disable interrupts */
652
653 lwz r3,_MSR(r1) /* Returning to user mode? */
654 andi. r0,r3,MSR_PR
655 beq resume_kernel
656
657user_exc_return: /* r10 contains MSR_KERNEL here */
658 /* Check current_thread_info()->flags */
6cb7bfeb 659 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338 660 lwz r9,TI_FLAGS(r9)
1bd79336 661 andi. r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
9994a338
PM
662 bne do_work
663
664restore_user:
665#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
666 /* Check whether this process has its own DBCR0 value. The single
667 step bit tells us that dbcr0 should be loaded. */
668 lwz r0,THREAD+THREAD_DBCR0(r2)
669 andis. r10,r0,DBCR0_IC@h
670 bnel- load_dbcr0
671#endif
672
673#ifdef CONFIG_PREEMPT
674 b restore
675
676/* N.B. the only way to get here is from the beq following ret_from_except. */
677resume_kernel:
678 /* check current_thread_info->preempt_count */
6cb7bfeb 679 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
680 lwz r0,TI_PREEMPT(r9)
681 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
682 bne restore
683 lwz r0,TI_FLAGS(r9)
684 andi. r0,r0,_TIF_NEED_RESCHED
685 beq+ restore
686 andi. r0,r3,MSR_EE /* interrupts off? */
687 beq restore /* don't schedule if so */
6881: bl preempt_schedule_irq
6cb7bfeb 689 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
690 lwz r3,TI_FLAGS(r9)
691 andi. r0,r3,_TIF_NEED_RESCHED
692 bne- 1b
693#else
694resume_kernel:
695#endif /* CONFIG_PREEMPT */
696
697 /* interrupts are hard-disabled at this point */
698restore:
b98ac05d
BH
699#ifdef CONFIG_44x
700 lis r4,icache_44x_need_flush@ha
701 lwz r5,icache_44x_need_flush@l(r4)
702 cmplwi cr0,r5,0
703 beq+ 1f
704 li r6,0
705 iccci r0,r0
706 stw r6,icache_44x_need_flush@l(r4)
7071:
708#endif /* CONFIG_44x */
9994a338
PM
709 lwz r0,GPR0(r1)
710 lwz r2,GPR2(r1)
711 REST_4GPRS(3, r1)
712 REST_2GPRS(7, r1)
713
714 lwz r10,_XER(r1)
715 lwz r11,_CTR(r1)
716 mtspr SPRN_XER,r10
717 mtctr r11
718
719 PPC405_ERR77(0,r1)
720 stwcx. r0,0,r1 /* to clear the reservation */
721
722#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
723 lwz r9,_MSR(r1)
724 andi. r10,r9,MSR_RI /* check if this exception occurred */
725 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
726
727 lwz r10,_CCR(r1)
728 lwz r11,_LINK(r1)
729 mtcrf 0xFF,r10
730 mtlr r11
731
732 /*
733 * Once we put values in SRR0 and SRR1, we are in a state
734 * where exceptions are not recoverable, since taking an
735 * exception will trash SRR0 and SRR1. Therefore we clear the
736 * MSR:RI bit to indicate this. If we do take an exception,
737 * we can't return to the point of the exception but we
738 * can restart the exception exit path at the label
739 * exc_exit_restart below. -- paulus
740 */
741 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
742 SYNC
743 MTMSRD(r10) /* clear the RI bit */
744 .globl exc_exit_restart
745exc_exit_restart:
746 lwz r9,_MSR(r1)
747 lwz r12,_NIP(r1)
748 FIX_SRR1(r9,r10)
749 mtspr SPRN_SRR0,r12
750 mtspr SPRN_SRR1,r9
751 REST_4GPRS(9, r1)
752 lwz r1,GPR1(r1)
753 .globl exc_exit_restart_end
754exc_exit_restart_end:
755 SYNC
756 RFI
757
758#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
759 /*
760 * This is a bit different on 4xx/Book-E because it doesn't have
761 * the RI bit in the MSR.
762 * The TLB miss handler checks if we have interrupted
763 * the exception exit path and restarts it if so
764 * (well maybe one day it will... :).
765 */
766 lwz r11,_LINK(r1)
767 mtlr r11
768 lwz r10,_CCR(r1)
769 mtcrf 0xff,r10
770 REST_2GPRS(9, r1)
771 .globl exc_exit_restart
772exc_exit_restart:
773 lwz r11,_NIP(r1)
774 lwz r12,_MSR(r1)
775exc_exit_start:
776 mtspr SPRN_SRR0,r11
777 mtspr SPRN_SRR1,r12
778 REST_2GPRS(11, r1)
779 lwz r1,GPR1(r1)
780 .globl exc_exit_restart_end
781exc_exit_restart_end:
782 PPC405_ERR77_SYNC
783 rfi
784 b . /* prevent prefetch past rfi */
785
786/*
787 * Returning from a critical interrupt in user mode doesn't need
788 * to be any different from a normal exception. For a critical
789 * interrupt in the kernel, we just return (without checking for
790 * preemption) since the interrupt may have happened at some crucial
791 * place (e.g. inside the TLB miss handler), and because we will be
792 * running with r1 pointing into critical_stack, not the current
793 * process's kernel stack (and therefore current_thread_info() will
794 * give the wrong answer).
795 * We have to restore various SPRs that may have been in use at the
796 * time of the critical interrupt.
797 *
798 */
799#ifdef CONFIG_40x
800#define PPC_40x_TURN_OFF_MSR_DR \
801 /* avoid any possible TLB misses here by turning off MSR.DR, we \
802 * assume the instructions here are mapped by a pinned TLB entry */ \
803 li r10,MSR_IR; \
804 mtmsr r10; \
805 isync; \
806 tophys(r1, r1);
807#else
808#define PPC_40x_TURN_OFF_MSR_DR
809#endif
810
811#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
812 REST_NVGPRS(r1); \
813 lwz r3,_MSR(r1); \
814 andi. r3,r3,MSR_PR; \
815 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
816 bne user_exc_return; \
817 lwz r0,GPR0(r1); \
818 lwz r2,GPR2(r1); \
819 REST_4GPRS(3, r1); \
820 REST_2GPRS(7, r1); \
821 lwz r10,_XER(r1); \
822 lwz r11,_CTR(r1); \
823 mtspr SPRN_XER,r10; \
824 mtctr r11; \
825 PPC405_ERR77(0,r1); \
826 stwcx. r0,0,r1; /* to clear the reservation */ \
827 lwz r11,_LINK(r1); \
828 mtlr r11; \
829 lwz r10,_CCR(r1); \
830 mtcrf 0xff,r10; \
831 PPC_40x_TURN_OFF_MSR_DR; \
832 lwz r9,_DEAR(r1); \
833 lwz r10,_ESR(r1); \
834 mtspr SPRN_DEAR,r9; \
835 mtspr SPRN_ESR,r10; \
836 lwz r11,_NIP(r1); \
837 lwz r12,_MSR(r1); \
838 mtspr exc_lvl_srr0,r11; \
839 mtspr exc_lvl_srr1,r12; \
840 lwz r9,GPR9(r1); \
841 lwz r12,GPR12(r1); \
842 lwz r10,GPR10(r1); \
843 lwz r11,GPR11(r1); \
844 lwz r1,GPR1(r1); \
845 PPC405_ERR77_SYNC; \
846 exc_lvl_rfi; \
847 b .; /* prevent prefetch past exc_lvl_rfi */
848
849 .globl ret_from_crit_exc
850ret_from_crit_exc:
851 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
852
853#ifdef CONFIG_BOOKE
854 .globl ret_from_debug_exc
855ret_from_debug_exc:
856 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
857
858 .globl ret_from_mcheck_exc
859ret_from_mcheck_exc:
860 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
861#endif /* CONFIG_BOOKE */
862
863/*
864 * Load the DBCR0 value for a task that is being ptraced,
865 * having first saved away the global DBCR0. Note that r0
866 * has the dbcr0 value to set upon entry to this.
867 */
868load_dbcr0:
869 mfmsr r10 /* first disable debug exceptions */
870 rlwinm r10,r10,0,~MSR_DE
871 mtmsr r10
872 isync
873 mfspr r10,SPRN_DBCR0
874 lis r11,global_dbcr0@ha
875 addi r11,r11,global_dbcr0@l
876 stw r10,0(r11)
877 mtspr SPRN_DBCR0,r0
878 lwz r10,4(r11)
879 addi r10,r10,1
880 stw r10,4(r11)
881 li r11,-1
882 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
883 blr
884
991eb43a
KG
885 .section .bss
886 .align 4
887global_dbcr0:
888 .space 8
889 .previous
9994a338
PM
890#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
891
892do_work: /* r10 contains MSR_KERNEL here */
893 andi. r0,r9,_TIF_NEED_RESCHED
894 beq do_user_signal
895
896do_resched: /* r10 contains MSR_KERNEL here */
897 ori r10,r10,MSR_EE
898 SYNC
899 MTMSRD(r10) /* hard-enable interrupts */
900 bl schedule
901recheck:
902 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
903 SYNC
904 MTMSRD(r10) /* disable interrupts */
6cb7bfeb 905 rlwinm r9,r1,0,0,(31-THREAD_SHIFT)
9994a338
PM
906 lwz r9,TI_FLAGS(r9)
907 andi. r0,r9,_TIF_NEED_RESCHED
908 bne- do_resched
f27201da 909 andi. r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
9994a338
PM
910 beq restore_user
911do_user_signal: /* r10 contains MSR_KERNEL here */
912 ori r10,r10,MSR_EE
913 SYNC
914 MTMSRD(r10) /* hard-enable interrupts */
915 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 916 lwz r3,_TRAP(r1)
9994a338
PM
917 andi. r0,r3,1
918 beq 2f
919 SAVE_NVGPRS(r1)
920 rlwinm r3,r3,0,0,30
d73e0c99 921 stw r3,_TRAP(r1)
9994a338
PM
9222: li r3,0
923 addi r4,r1,STACK_FRAME_OVERHEAD
924 bl do_signal
925 REST_NVGPRS(r1)
926 b recheck
927
928/*
929 * We come here when we are at the end of handling an exception
930 * that occurred at a place where taking an exception will lose
931 * state information, such as the contents of SRR0 and SRR1.
932 */
933nonrecoverable:
934 lis r10,exc_exit_restart_end@ha
935 addi r10,r10,exc_exit_restart_end@l
936 cmplw r12,r10
937 bge 3f
938 lis r11,exc_exit_restart@ha
939 addi r11,r11,exc_exit_restart@l
940 cmplw r12,r11
941 blt 3f
942 lis r10,ee_restarts@ha
943 lwz r12,ee_restarts@l(r10)
944 addi r12,r12,1
945 stw r12,ee_restarts@l(r10)
946 mr r12,r11 /* restart at exc_exit_restart */
947 blr
9483: /* OK, we can't recover, kill this process */
949 /* but the 601 doesn't implement the RI bit, so assume it's OK */
950BEGIN_FTR_SECTION
951 blr
952END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 953 lwz r3,_TRAP(r1)
9994a338
PM
954 andi. r0,r3,1
955 beq 4f
956 SAVE_NVGPRS(r1)
957 rlwinm r3,r3,0,0,30
d73e0c99 958 stw r3,_TRAP(r1)
9994a338
PM
9594: addi r3,r1,STACK_FRAME_OVERHEAD
960 bl nonrecoverable_exception
961 /* shouldn't return */
962 b 4b
963
991eb43a
KG
964 .section .bss
965 .align 2
966ee_restarts:
967 .space 4
968 .previous
9994a338
PM
969
970/*
971 * PROM code for specific machines follows. Put it
972 * here so it's easy to add arch-specific sections later.
973 * -- Cort
974 */
033ef338 975#ifdef CONFIG_PPC_RTAS
9994a338
PM
976/*
977 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
978 * called with the MMU off.
979 */
980_GLOBAL(enter_rtas)
981 stwu r1,-INT_FRAME_SIZE(r1)
982 mflr r0
983 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 984 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
985 lis r6,1f@ha /* physical return address for rtas */
986 addi r6,r6,1f@l
987 tophys(r6,r6)
988 tophys(r7,r1)
033ef338
PM
989 lwz r8,RTASENTRY(r4)
990 lwz r4,RTASBASE(r4)
9994a338
PM
991 mfmsr r9
992 stw r9,8(r1)
993 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
994 SYNC /* disable interrupts so SRR0/1 */
995 MTMSRD(r0) /* don't get trashed */
996 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
997 mtlr r6
9994a338
PM
998 mtspr SPRN_SPRG2,r7
999 mtspr SPRN_SRR0,r8
1000 mtspr SPRN_SRR1,r9
1001 RFI
10021: tophys(r9,r1)
1003 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1004 lwz r9,8(r9) /* original msr value */
1005 FIX_SRR1(r9,r0)
1006 addi r1,r1,INT_FRAME_SIZE
1007 li r0,0
1008 mtspr SPRN_SPRG2,r0
1009 mtspr SPRN_SRR0,r8
1010 mtspr SPRN_SRR1,r9
1011 RFI /* return to caller */
1012
1013 .globl machine_check_in_rtas
1014machine_check_in_rtas:
1015 twi 31,0,0
1016 /* XXX load up BATs and panic */
1017
033ef338 1018#endif /* CONFIG_PPC_RTAS */