powerpc/mm: Pre-filter SRR1 bits before do_page_fault()
[linux-2.6-block.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
9994a338 22#include <linux/errno.h>
c3525940 23#include <linux/err.h>
9994a338
PM
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
46f52210 34#include <asm/ptrace.h>
9445aa1a 35#include <asm/export.h>
9994a338 36
9994a338
PM
37/*
38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
39 */
40#if MSR_KERNEL >= 0x10000
41#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
42#else
43#define LOAD_MSR_KERNEL(r, x) li r,(x)
44#endif
45
46#ifdef CONFIG_BOOKE
9994a338
PM
47 .globl mcheck_transfer_to_handler
48mcheck_transfer_to_handler:
fca622c5
KG
49 mfspr r0,SPRN_DSRR0
50 stw r0,_DSRR0(r11)
51 mfspr r0,SPRN_DSRR1
52 stw r0,_DSRR1(r11)
53 /* fall through */
9994a338
PM
54
55 .globl debug_transfer_to_handler
56debug_transfer_to_handler:
fca622c5
KG
57 mfspr r0,SPRN_CSRR0
58 stw r0,_CSRR0(r11)
59 mfspr r0,SPRN_CSRR1
60 stw r0,_CSRR1(r11)
61 /* fall through */
9994a338
PM
62
63 .globl crit_transfer_to_handler
64crit_transfer_to_handler:
70fe3af8 65#ifdef CONFIG_PPC_BOOK3E_MMU
fca622c5
KG
66 mfspr r0,SPRN_MAS0
67 stw r0,MAS0(r11)
68 mfspr r0,SPRN_MAS1
69 stw r0,MAS1(r11)
70 mfspr r0,SPRN_MAS2
71 stw r0,MAS2(r11)
72 mfspr r0,SPRN_MAS3
73 stw r0,MAS3(r11)
74 mfspr r0,SPRN_MAS6
75 stw r0,MAS6(r11)
76#ifdef CONFIG_PHYS_64BIT
77 mfspr r0,SPRN_MAS7
78 stw r0,MAS7(r11)
79#endif /* CONFIG_PHYS_64BIT */
70fe3af8 80#endif /* CONFIG_PPC_BOOK3E_MMU */
fca622c5
KG
81#ifdef CONFIG_44x
82 mfspr r0,SPRN_MMUCR
83 stw r0,MMUCR(r11)
84#endif
85 mfspr r0,SPRN_SRR0
86 stw r0,_SRR0(r11)
87 mfspr r0,SPRN_SRR1
88 stw r0,_SRR1(r11)
89
1f8b0bc8
SY
90 /* set the stack limit to the current stack
91 * and set the limit to protect the thread_info
92 * struct
93 */
ee43eb78 94 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
95 lwz r0,KSP_LIMIT(r8)
96 stw r0,SAVED_KSP_LIMIT(r11)
1f8b0bc8 97 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 98 stw r0,KSP_LIMIT(r8)
9994a338
PM
99 /* fall through */
100#endif
101
102#ifdef CONFIG_40x
103 .globl crit_transfer_to_handler
104crit_transfer_to_handler:
105 lwz r0,crit_r10@l(0)
106 stw r0,GPR10(r11)
107 lwz r0,crit_r11@l(0)
108 stw r0,GPR11(r11)
fca622c5
KG
109 mfspr r0,SPRN_SRR0
110 stw r0,crit_srr0@l(0)
111 mfspr r0,SPRN_SRR1
112 stw r0,crit_srr1@l(0)
113
1f8b0bc8
SY
114 /* set the stack limit to the current stack
115 * and set the limit to protect the thread_info
116 * struct
117 */
ee43eb78 118 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
119 lwz r0,KSP_LIMIT(r8)
120 stw r0,saved_ksp_limit@l(0)
1f8b0bc8 121 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 122 stw r0,KSP_LIMIT(r8)
9994a338
PM
123 /* fall through */
124#endif
125
126/*
127 * This code finishes saving the registers to the exception frame
128 * and jumps to the appropriate handler for the exception, turning
129 * on address translation.
130 * Note that we rely on the caller having set cr0.eq iff the exception
131 * occurred in kernel mode (i.e. MSR:PR = 0).
132 */
133 .globl transfer_to_handler_full
134transfer_to_handler_full:
135 SAVE_NVGPRS(r11)
136 /* fall through */
137
138 .globl transfer_to_handler
139transfer_to_handler:
140 stw r2,GPR2(r11)
141 stw r12,_NIP(r11)
142 stw r9,_MSR(r11)
143 andi. r2,r9,MSR_PR
144 mfctr r12
145 mfspr r2,SPRN_XER
146 stw r12,_CTR(r11)
147 stw r2,_XER(r11)
ee43eb78 148 mfspr r12,SPRN_SPRG_THREAD
9994a338
PM
149 addi r2,r12,-THREAD
150 tovirt(r2,r2) /* set r2 to current */
151 beq 2f /* if from user, fix up THREAD.regs */
152 addi r11,r1,STACK_FRAME_OVERHEAD
153 stw r11,PT_REGS(r12)
154#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
155 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 156 internal debug mode bit to do this. */
9994a338 157 lwz r12,THREAD_DBCR0(r12)
2325f0a0 158 andis. r12,r12,DBCR0_IDM@h
9994a338
PM
159 beq+ 3f
160 /* From user and task is ptraced - load up global dbcr0 */
161 li r12,-1 /* clear all pending debug events */
162 mtspr SPRN_DBSR,r12
163 lis r11,global_dbcr0@ha
164 tophys(r11,r11)
165 addi r11,r11,global_dbcr0@l
4eaddb4d 166#ifdef CONFIG_SMP
9778b696 167 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
168 lwz r9,TI_CPU(r9)
169 slwi r9,r9,3
170 add r11,r11,r9
171#endif
9994a338
PM
172 lwz r12,0(r11)
173 mtspr SPRN_DBCR0,r12
174 lwz r12,4(r11)
175 addi r12,r12,-1
176 stw r12,4(r11)
177#endif
c223c903
CL
178#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
179 CURRENT_THREAD_INFO(r9, r1)
180 tophys(r9, r9)
181 ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
182#endif
183
9994a338 184 b 3f
f39224a8 185
9994a338
PM
1862: /* if from kernel, check interrupted DOZE/NAP mode and
187 * check for stack overflow
188 */
85218827
KG
189 lwz r9,KSP_LIMIT(r12)
190 cmplw r1,r9 /* if r1 <= ksp_limit */
f39224a8
PM
191 ble- stack_ovf /* then the kernel stack overflowed */
1925:
fc4033b2 193#if defined(CONFIG_6xx) || defined(CONFIG_E500)
9778b696 194 CURRENT_THREAD_INFO(r9, r1)
f39224a8
PM
195 tophys(r9,r9) /* check local flags */
196 lwz r12,TI_LOCAL_FLAGS(r9)
197 mtcrf 0x01,r12
198 bt- 31-TLF_NAPPING,4f
a560643e 199 bt- 31-TLF_SLEEPING,7f
fc4033b2 200#endif /* CONFIG_6xx || CONFIG_E500 */
9994a338
PM
201 .globl transfer_to_handler_cont
202transfer_to_handler_cont:
9994a338
PM
2033:
204 mflr r9
205 lwz r11,0(r9) /* virtual address of handler */
206 lwz r9,4(r9) /* where to go when done */
75b82472
CL
207#ifdef CONFIG_PPC_8xx_PERF_EVENT
208 mtspr SPRN_NRI, r0
209#endif
5d38902c
BH
210#ifdef CONFIG_TRACE_IRQFLAGS
211 lis r12,reenable_mmu@h
212 ori r12,r12,reenable_mmu@l
213 mtspr SPRN_SRR0,r12
214 mtspr SPRN_SRR1,r10
215 SYNC
216 RFI
217reenable_mmu: /* re-enable mmu so we can */
218 mfmsr r10
219 lwz r12,_MSR(r1)
220 xor r10,r10,r12
221 andi. r10,r10,MSR_EE /* Did EE change? */
222 beq 1f
223
2cd76629
KH
224 /*
225 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
226 * If from user mode there is only one stack frame on the stack, and
227 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
228 * stack frame to make trace_hardirqs_off happy.
08f1ec8a
BH
229 *
230 * This is handy because we also need to save a bunch of GPRs,
231 * r3 can be different from GPR3(r1) at this point, r9 and r11
232 * contains the old MSR and handler address respectively,
233 * r4 & r5 can contain page fault arguments that need to be passed
234 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
235 * they aren't useful past this point (aren't syscall arguments),
236 * the rest is restored from the exception frame.
2cd76629 237 */
08f1ec8a
BH
238 stwu r1,-32(r1)
239 stw r9,8(r1)
240 stw r11,12(r1)
241 stw r3,16(r1)
242 stw r4,20(r1)
243 stw r5,24(r1)
2cd76629 244 bl trace_hardirqs_off
08f1ec8a
BH
245 lwz r5,24(r1)
246 lwz r4,20(r1)
247 lwz r3,16(r1)
248 lwz r11,12(r1)
249 lwz r9,8(r1)
250 addi r1,r1,32
5d38902c 251 lwz r0,GPR0(r1)
5d38902c
BH
252 lwz r6,GPR6(r1)
253 lwz r7,GPR7(r1)
254 lwz r8,GPR8(r1)
5d38902c
BH
2551: mtctr r11
256 mtlr r9
257 bctr /* jump to handler */
258#else /* CONFIG_TRACE_IRQFLAGS */
9994a338
PM
259 mtspr SPRN_SRR0,r11
260 mtspr SPRN_SRR1,r10
261 mtlr r9
262 SYNC
263 RFI /* jump to handler, enable MMU */
5d38902c 264#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 265
fc4033b2 266#if defined (CONFIG_6xx) || defined(CONFIG_E500)
f39224a8
PM
2674: rlwinm r12,r12,0,~_TLF_NAPPING
268 stw r12,TI_LOCAL_FLAGS(r9)
fc4033b2 269 b power_save_ppc32_restore
a560643e
PM
270
2717: rlwinm r12,r12,0,~_TLF_SLEEPING
272 stw r12,TI_LOCAL_FLAGS(r9)
273 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
274 rlwinm r9,r9,0,~MSR_EE
275 lwz r12,_LINK(r11) /* and return to address in LR */
276 b fast_exception_return
a0652fc9
PM
277#endif
278
9994a338
PM
279/*
280 * On kernel stack overflow, load up an initial stack pointer
281 * and call StackOverflow(regs), which should not return.
282 */
283stack_ovf:
284 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
285 lis r12,_end@h
286 ori r12,r12,_end@l
287 cmplw r1,r12
288 ble 5b /* r1 <= &_end is OK */
9994a338
PM
289 SAVE_NVGPRS(r11)
290 addi r3,r1,STACK_FRAME_OVERHEAD
291 lis r1,init_thread_union@ha
292 addi r1,r1,init_thread_union@l
293 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
294 lis r9,StackOverflow@ha
295 addi r9,r9,StackOverflow@l
296 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
75b82472
CL
297#ifdef CONFIG_PPC_8xx_PERF_EVENT
298 mtspr SPRN_NRI, r0
299#endif
9994a338
PM
300 mtspr SPRN_SRR0,r9
301 mtspr SPRN_SRR1,r10
302 SYNC
303 RFI
304
305/*
306 * Handle a system call.
307 */
308 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
309 .stabs "entry_32.S",N_SO,0,0,0f
3100:
311
312_GLOBAL(DoSyscall)
9994a338
PM
313 stw r3,ORIG_GPR3(r1)
314 li r12,0
315 stw r12,RESULT(r1)
316 lwz r11,_CCR(r1) /* Clear SO bit in CR */
317 rlwinm r11,r11,0,4,2
318 stw r11,_CCR(r1)
5d38902c
BH
319#ifdef CONFIG_TRACE_IRQFLAGS
320 /* Return from syscalls can (and generally will) hard enable
321 * interrupts. You aren't supposed to call a syscall with
322 * interrupts disabled in the first place. However, to ensure
323 * that we get it right vs. lockdep if it happens, we force
324 * that hard enable here with appropriate tracing if we see
325 * that we have been called with interrupts off
326 */
327 mfmsr r11
328 andi. r12,r11,MSR_EE
329 bne+ 1f
330 /* We came in with interrupts disabled, we enable them now */
331 bl trace_hardirqs_on
332 mfmsr r11
333 lwz r0,GPR0(r1)
334 lwz r3,GPR3(r1)
335 lwz r4,GPR4(r1)
336 ori r11,r11,MSR_EE
337 lwz r5,GPR5(r1)
338 lwz r6,GPR6(r1)
339 lwz r7,GPR7(r1)
340 lwz r8,GPR8(r1)
341 mtmsr r11
3421:
343#endif /* CONFIG_TRACE_IRQFLAGS */
9778b696 344 CURRENT_THREAD_INFO(r10, r1)
9994a338 345 lwz r11,TI_FLAGS(r10)
10ea8343 346 andi. r11,r11,_TIF_SYSCALL_DOTRACE
9994a338
PM
347 bne- syscall_dotrace
348syscall_dotrace_cont:
349 cmplwi 0,r0,NR_syscalls
350 lis r10,sys_call_table@h
351 ori r10,r10,sys_call_table@l
352 slwi r0,r0,2
353 bge- 66f
354 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
355 mtlr r10
356 addi r9,r1,STACK_FRAME_OVERHEAD
357 PPC440EP_ERR42
358 blrl /* Call handler */
359 .globl ret_from_syscall
360ret_from_syscall:
9994a338 361 mr r6,r3
9778b696 362 CURRENT_THREAD_INFO(r12, r1)
9994a338 363 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 364 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
5d38902c 365 /* Note: We don't bother telling lockdep about it */
9994a338
PM
366 SYNC
367 MTMSRD(r10)
368 lwz r9,TI_FLAGS(r12)
c3525940 369 li r8,-MAX_ERRNO
10ea8343 370 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 371 bne- syscall_exit_work
401d1f02
DW
372 cmplw 0,r3,r8
373 blt+ syscall_exit_cont
374 lwz r11,_CCR(r1) /* Load CR */
375 neg r3,r3
376 oris r11,r11,0x1000 /* Set SO bit in CR */
377 stw r11,_CCR(r1)
9994a338 378syscall_exit_cont:
5d38902c
BH
379 lwz r8,_MSR(r1)
380#ifdef CONFIG_TRACE_IRQFLAGS
381 /* If we are going to return from the syscall with interrupts
382 * off, we trace that here. It shouldn't happen though but we
383 * want to catch the bugger if it does right ?
384 */
385 andi. r10,r8,MSR_EE
386 bne+ 1f
387 stw r3,GPR3(r1)
388 bl trace_hardirqs_off
389 lwz r3,GPR3(r1)
3901:
391#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 392#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
393 /* If the process has its own DBCR0 value, load it up. The internal
394 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 395 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 396 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
397 bnel- load_dbcr0
398#endif
b98ac05d 399#ifdef CONFIG_44x
e7f75ad0 400BEGIN_MMU_FTR_SECTION
b98ac05d
BH
401 lis r4,icache_44x_need_flush@ha
402 lwz r5,icache_44x_need_flush@l(r4)
403 cmplwi cr0,r5,0
404 bne- 2f
4051:
e7f75ad0 406END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
b98ac05d 407#endif /* CONFIG_44x */
b64f87c1
BB
408BEGIN_FTR_SECTION
409 lwarx r7,0,r1
410END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338 411 stwcx. r0,0,r1 /* to clear the reservation */
c223c903
CL
412#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
413 andi. r4,r8,MSR_PR
414 beq 3f
415 CURRENT_THREAD_INFO(r4, r1)
416 ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4173:
418#endif
9994a338
PM
419 lwz r4,_LINK(r1)
420 lwz r5,_CCR(r1)
421 mtlr r4
422 mtcr r5
423 lwz r7,_NIP(r1)
9994a338
PM
424 lwz r2,GPR2(r1)
425 lwz r1,GPR1(r1)
75b82472
CL
426#ifdef CONFIG_PPC_8xx_PERF_EVENT
427 mtspr SPRN_NRI, r0
428#endif
9994a338
PM
429 mtspr SPRN_SRR0,r7
430 mtspr SPRN_SRR1,r8
431 SYNC
432 RFI
b98ac05d
BH
433#ifdef CONFIG_44x
4342: li r7,0
435 iccci r0,r0
436 stw r7,icache_44x_need_flush@l(r4)
437 b 1b
438#endif /* CONFIG_44x */
9994a338
PM
439
44066: li r3,-ENOSYS
441 b ret_from_syscall
442
443 .globl ret_from_fork
444ret_from_fork:
445 REST_NVGPRS(r1)
446 bl schedule_tail
447 li r3,0
448 b ret_from_syscall
449
58254e10
AV
450 .globl ret_from_kernel_thread
451ret_from_kernel_thread:
452 REST_NVGPRS(r1)
453 bl schedule_tail
454 mtlr r14
455 mr r3,r15
456 PPC440EP_ERR42
457 blrl
458 li r3,0
be6abfa7 459 b ret_from_syscall
9994a338
PM
460
461/* Traced system call support */
462syscall_dotrace:
463 SAVE_NVGPRS(r1)
464 li r0,0xc00
d73e0c99 465 stw r0,_TRAP(r1)
9994a338
PM
466 addi r3,r1,STACK_FRAME_OVERHEAD
467 bl do_syscall_trace_enter
4f72c427
RM
468 /*
469 * Restore argument registers possibly just changed.
470 * We use the return value of do_syscall_trace_enter
471 * for call number to look up in the table (r0).
472 */
473 mr r0,r3
9994a338
PM
474 lwz r3,GPR3(r1)
475 lwz r4,GPR4(r1)
476 lwz r5,GPR5(r1)
477 lwz r6,GPR6(r1)
478 lwz r7,GPR7(r1)
479 lwz r8,GPR8(r1)
480 REST_NVGPRS(r1)
d3837414
ME
481
482 cmplwi r0,NR_syscalls
483 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
484 bge- ret_from_syscall
9994a338
PM
485 b syscall_dotrace_cont
486
487syscall_exit_work:
401d1f02 488 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
489 beq+ 0f
490 REST_NVGPRS(r1)
491 b 2f
4920: cmplw 0,r3,r8
401d1f02
DW
493 blt+ 1f
494 andi. r0,r9,_TIF_NOERROR
495 bne- 1f
496 lwz r11,_CCR(r1) /* Load CR */
497 neg r3,r3
498 oris r11,r11,0x1000 /* Set SO bit in CR */
499 stw r11,_CCR(r1)
500
5011: stw r6,RESULT(r1) /* Save result */
9994a338 502 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
5032: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
504 beq 4f
505
1bd79336 506 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
507
508 li r11,_TIF_PERSYSCALL_MASK
509 addi r12,r12,TI_FLAGS
5103: lwarx r8,0,r12
511 andc r8,r8,r11
512#ifdef CONFIG_IBM405_ERR77
513 dcbt 0,r12
514#endif
515 stwcx. r8,0,r12
516 bne- 3b
517 subi r12,r12,TI_FLAGS
518
5194: /* Anything which requires enabling interrupts? */
10ea8343 520 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
1bd79336
PM
521 beq ret_from_except
522
5d38902c
BH
523 /* Re-enable interrupts. There is no need to trace that with
524 * lockdep as we are supposed to have IRQs on at this point
525 */
1bd79336
PM
526 ori r10,r10,MSR_EE
527 SYNC
528 MTMSRD(r10)
401d1f02
DW
529
530 /* Save NVGPRS if they're not saved already */
d73e0c99 531 lwz r4,_TRAP(r1)
9994a338 532 andi. r4,r4,1
401d1f02 533 beq 5f
9994a338
PM
534 SAVE_NVGPRS(r1)
535 li r4,0xc00
d73e0c99 536 stw r4,_TRAP(r1)
1bd79336 5375:
9994a338
PM
538 addi r3,r1,STACK_FRAME_OVERHEAD
539 bl do_syscall_trace_leave
1bd79336 540 b ret_from_except_full
9994a338 541
9994a338 542/*
401d1f02
DW
543 * The fork/clone functions need to copy the full register set into
544 * the child process. Therefore we need to save all the nonvolatile
545 * registers (r13 - r31) before calling the C code.
9994a338 546 */
9994a338
PM
547 .globl ppc_fork
548ppc_fork:
549 SAVE_NVGPRS(r1)
d73e0c99 550 lwz r0,_TRAP(r1)
9994a338 551 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 552 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
553 b sys_fork
554
555 .globl ppc_vfork
556ppc_vfork:
557 SAVE_NVGPRS(r1)
d73e0c99 558 lwz r0,_TRAP(r1)
9994a338 559 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 560 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
561 b sys_vfork
562
563 .globl ppc_clone
564ppc_clone:
565 SAVE_NVGPRS(r1)
d73e0c99 566 lwz r0,_TRAP(r1)
9994a338 567 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 568 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
569 b sys_clone
570
1bd79336
PM
571 .globl ppc_swapcontext
572ppc_swapcontext:
573 SAVE_NVGPRS(r1)
574 lwz r0,_TRAP(r1)
575 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
576 stw r0,_TRAP(r1) /* register set saved */
577 b sys_swapcontext
578
9994a338
PM
579/*
580 * Top-level page fault handling.
581 * This is in assembler because if do_page_fault tells us that
582 * it is a bad kernel page fault, we want to save the non-volatile
583 * registers before calling bad_page_fault.
584 */
585 .globl handle_page_fault
586handle_page_fault:
587 stw r4,_DAR(r1)
588 addi r3,r1,STACK_FRAME_OVERHEAD
589 bl do_page_fault
590 cmpwi r3,0
591 beq+ ret_from_except
592 SAVE_NVGPRS(r1)
d73e0c99 593 lwz r0,_TRAP(r1)
9994a338 594 clrrwi r0,r0,1
d73e0c99 595 stw r0,_TRAP(r1)
9994a338
PM
596 mr r5,r3
597 addi r3,r1,STACK_FRAME_OVERHEAD
598 lwz r4,_DAR(r1)
599 bl bad_page_fault
600 b ret_from_except_full
601
602/*
603 * This routine switches between two different tasks. The process
604 * state of one is saved on its kernel stack. Then the state
605 * of the other is restored from its kernel stack. The memory
606 * management hardware is updated to the second process's state.
607 * Finally, we can return to the second process.
608 * On entry, r3 points to the THREAD for the current task, r4
609 * points to the THREAD for the new task.
610 *
611 * This routine is always called with interrupts disabled.
612 *
613 * Note: there are two ways to get to the "going out" portion
614 * of this code; either by coming in via the entry (_switch)
615 * or via "fork" which must set up an environment equivalent
616 * to the "_switch" path. If you change this , you'll have to
617 * change the fork code also.
618 *
619 * The code which creates the new task context is in 'copy_thread'
620 * in arch/ppc/kernel/process.c
621 */
622_GLOBAL(_switch)
623 stwu r1,-INT_FRAME_SIZE(r1)
624 mflr r0
625 stw r0,INT_FRAME_SIZE+4(r1)
626 /* r3-r12 are caller saved -- Cort */
627 SAVE_NVGPRS(r1)
628 stw r0,_NIP(r1) /* Return to switch caller */
629 mfmsr r11
630 li r0,MSR_FP /* Disable floating-point */
631#ifdef CONFIG_ALTIVEC
632BEGIN_FTR_SECTION
633 oris r0,r0,MSR_VEC@h /* Disable altivec */
634 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
635 stw r12,THREAD+THREAD_VRSAVE(r2)
636END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
637#endif /* CONFIG_ALTIVEC */
638#ifdef CONFIG_SPE
5e14d21e 639BEGIN_FTR_SECTION
9994a338
PM
640 oris r0,r0,MSR_SPE@h /* Disable SPE */
641 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
642 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 643END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
644#endif /* CONFIG_SPE */
645 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
646 beq+ 1f
647 andc r11,r11,r0
648 MTMSRD(r11)
649 isync
6501: stw r11,_MSR(r1)
651 mfcr r10
652 stw r10,_CCR(r1)
653 stw r1,KSP(r3) /* Set old stack pointer */
654
655#ifdef CONFIG_SMP
656 /* We need a sync somewhere here to make sure that if the
657 * previous task gets rescheduled on another CPU, it sees all
658 * stores it has performed on this one.
659 */
660 sync
661#endif /* CONFIG_SMP */
662
663 tophys(r0,r4)
ee43eb78 664 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
9994a338
PM
665 lwz r1,KSP(r4) /* Load new stack pointer */
666
667 /* save the old current 'last' for return value */
668 mr r3,r2
669 addi r2,r4,-THREAD /* Update current */
670
671#ifdef CONFIG_ALTIVEC
672BEGIN_FTR_SECTION
673 lwz r0,THREAD+THREAD_VRSAVE(r2)
674 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
675END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
676#endif /* CONFIG_ALTIVEC */
677#ifdef CONFIG_SPE
5e14d21e 678BEGIN_FTR_SECTION
9994a338
PM
679 lwz r0,THREAD+THREAD_SPEFSCR(r2)
680 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 681END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338 682#endif /* CONFIG_SPE */
f2574030 683
9994a338
PM
684 lwz r0,_CCR(r1)
685 mtcrf 0xFF,r0
686 /* r3-r12 are destroyed -- Cort */
687 REST_NVGPRS(r1)
688
689 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
690 mtlr r4
691 addi r1,r1,INT_FRAME_SIZE
692 blr
693
694 .globl fast_exception_return
695fast_exception_return:
696#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
697 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
698 beq 1f /* if not, we've got problems */
699#endif
700
7012: REST_4GPRS(3, r11)
702 lwz r10,_CCR(r11)
703 REST_GPR(1, r11)
704 mtcr r10
705 lwz r10,_LINK(r11)
706 mtlr r10
707 REST_GPR(10, r11)
75b82472
CL
708#ifdef CONFIG_PPC_8xx_PERF_EVENT
709 mtspr SPRN_NRI, r0
710#endif
9994a338
PM
711 mtspr SPRN_SRR1,r9
712 mtspr SPRN_SRR0,r12
713 REST_GPR(9, r11)
714 REST_GPR(12, r11)
715 lwz r11,GPR11(r11)
716 SYNC
717 RFI
718
719#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
720/* check if the exception happened in a restartable section */
7211: lis r3,exc_exit_restart_end@ha
722 addi r3,r3,exc_exit_restart_end@l
723 cmplw r12,r3
724 bge 3f
725 lis r4,exc_exit_restart@ha
726 addi r4,r4,exc_exit_restart@l
727 cmplw r12,r4
728 blt 3f
729 lis r3,fee_restarts@ha
730 tophys(r3,r3)
731 lwz r5,fee_restarts@l(r3)
732 addi r5,r5,1
733 stw r5,fee_restarts@l(r3)
734 mr r12,r4 /* restart at exc_exit_restart */
735 b 2b
736
991eb43a
KG
737 .section .bss
738 .align 2
739fee_restarts:
740 .space 4
741 .previous
9994a338
PM
742
743/* aargh, a nonrecoverable interrupt, panic */
744/* aargh, we don't know which trap this is */
745/* but the 601 doesn't implement the RI bit, so assume it's OK */
7463:
747BEGIN_FTR_SECTION
748 b 2b
749END_FTR_SECTION_IFSET(CPU_FTR_601)
750 li r10,-1
d73e0c99 751 stw r10,_TRAP(r11)
9994a338
PM
752 addi r3,r1,STACK_FRAME_OVERHEAD
753 lis r10,MSR_KERNEL@h
754 ori r10,r10,MSR_KERNEL@l
755 bl transfer_to_handler_full
756 .long nonrecoverable_exception
757 .long ret_from_except
758#endif
759
9994a338
PM
760 .globl ret_from_except_full
761ret_from_except_full:
762 REST_NVGPRS(r1)
763 /* fall through */
764
765 .globl ret_from_except
766ret_from_except:
767 /* Hard-disable interrupts so that current_thread_info()->flags
768 * can't change between when we test it and when we return
769 * from the interrupt. */
5d38902c 770 /* Note: We don't bother telling lockdep about it */
9994a338
PM
771 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
772 SYNC /* Some chip revs have problems here... */
773 MTMSRD(r10) /* disable interrupts */
774
775 lwz r3,_MSR(r1) /* Returning to user mode? */
776 andi. r0,r3,MSR_PR
777 beq resume_kernel
778
779user_exc_return: /* r10 contains MSR_KERNEL here */
780 /* Check current_thread_info()->flags */
9778b696 781 CURRENT_THREAD_INFO(r9, r1)
9994a338 782 lwz r9,TI_FLAGS(r9)
7a10174e 783 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
784 bne do_work
785
786restore_user:
787#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
788 /* Check whether this process has its own DBCR0 value. The internal
789 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 790 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 791 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
792 bnel- load_dbcr0
793#endif
c223c903
CL
794#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
795 CURRENT_THREAD_INFO(r9, r1)
796 ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
797#endif
9994a338 798
9994a338
PM
799 b restore
800
801/* N.B. the only way to get here is from the beq following ret_from_except. */
802resume_kernel:
a9c4e541 803 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
9778b696 804 CURRENT_THREAD_INFO(r9, r1)
a9c4e541 805 lwz r8,TI_FLAGS(r9)
f7b33677 806 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
807 beq+ 1f
808
809 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
810
811 lwz r3,GPR1(r1)
812 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
813 mr r4,r1 /* src: current exception frame */
814 mr r1,r3 /* Reroute the trampoline frame to r1 */
815
816 /* Copy from the original to the trampoline. */
817 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
818 li r6,0 /* start offset: 0 */
819 mtctr r5
8202: lwzx r0,r6,r4
821 stwx r0,r6,r3
822 addi r6,r6,4
823 bdnz 2b
824
825 /* Do real store operation to complete stwu */
826 lwz r5,GPR1(r1)
827 stw r8,0(r5)
828
829 /* Clear _TIF_EMULATE_STACK_STORE flag */
830 lis r11,_TIF_EMULATE_STACK_STORE@h
831 addi r5,r9,TI_FLAGS
8320: lwarx r8,0,r5
833 andc r8,r8,r11
834#ifdef CONFIG_IBM405_ERR77
835 dcbt 0,r5
836#endif
837 stwcx. r8,0,r5
838 bne- 0b
8391:
840
841#ifdef CONFIG_PREEMPT
842 /* check current_thread_info->preempt_count */
9994a338
PM
843 lwz r0,TI_PREEMPT(r9)
844 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
845 bne restore
a9c4e541 846 andi. r8,r8,_TIF_NEED_RESCHED
9994a338 847 beq+ restore
a9c4e541 848 lwz r3,_MSR(r1)
9994a338
PM
849 andi. r0,r3,MSR_EE /* interrupts off? */
850 beq restore /* don't schedule if so */
5d38902c
BH
851#ifdef CONFIG_TRACE_IRQFLAGS
852 /* Lockdep thinks irqs are enabled, we need to call
853 * preempt_schedule_irq with IRQs off, so we inform lockdep
854 * now that we -did- turn them off already
855 */
856 bl trace_hardirqs_off
857#endif
9994a338 8581: bl preempt_schedule_irq
9778b696 859 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
860 lwz r3,TI_FLAGS(r9)
861 andi. r0,r3,_TIF_NEED_RESCHED
862 bne- 1b
5d38902c
BH
863#ifdef CONFIG_TRACE_IRQFLAGS
864 /* And now, to properly rebalance the above, we tell lockdep they
865 * are being turned back on, which will happen when we return
866 */
867 bl trace_hardirqs_on
868#endif
9994a338
PM
869#endif /* CONFIG_PREEMPT */
870
871 /* interrupts are hard-disabled at this point */
872restore:
b98ac05d 873#ifdef CONFIG_44x
e7f75ad0
DK
874BEGIN_MMU_FTR_SECTION
875 b 1f
876END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
b98ac05d
BH
877 lis r4,icache_44x_need_flush@ha
878 lwz r5,icache_44x_need_flush@l(r4)
879 cmplwi cr0,r5,0
880 beq+ 1f
881 li r6,0
882 iccci r0,r0
883 stw r6,icache_44x_need_flush@l(r4)
8841:
885#endif /* CONFIG_44x */
5d38902c
BH
886
887 lwz r9,_MSR(r1)
888#ifdef CONFIG_TRACE_IRQFLAGS
889 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
890 * off in this assembly code while peeking at TI_FLAGS() and such. However
891 * we need to inform it if the exception turned interrupts off, and we
892 * are about to trun them back on.
893 *
894 * The problem here sadly is that we don't know whether the exceptions was
895 * one that turned interrupts off or not. So we always tell lockdep about
896 * turning them on here when we go back to wherever we came from with EE
897 * on, even if that may meen some redudant calls being tracked. Maybe later
898 * we could encode what the exception did somewhere or test the exception
899 * type in the pt_regs but that sounds overkill
900 */
901 andi. r10,r9,MSR_EE
902 beq 1f
06ca2188
SR
903 /*
904 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
905 * which is the stack frame here, we need to force a stack frame
906 * in case we came from user space.
907 */
908 stwu r1,-32(r1)
909 mflr r0
910 stw r0,4(r1)
911 stwu r1,-32(r1)
5d38902c 912 bl trace_hardirqs_on
06ca2188
SR
913 lwz r1,0(r1)
914 lwz r1,0(r1)
5d38902c
BH
915 lwz r9,_MSR(r1)
9161:
917#endif /* CONFIG_TRACE_IRQFLAGS */
918
9994a338
PM
919 lwz r0,GPR0(r1)
920 lwz r2,GPR2(r1)
921 REST_4GPRS(3, r1)
922 REST_2GPRS(7, r1)
923
924 lwz r10,_XER(r1)
925 lwz r11,_CTR(r1)
926 mtspr SPRN_XER,r10
927 mtctr r11
928
929 PPC405_ERR77(0,r1)
b64f87c1
BB
930BEGIN_FTR_SECTION
931 lwarx r11,0,r1
932END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
933 stwcx. r0,0,r1 /* to clear the reservation */
934
935#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
9994a338
PM
936 andi. r10,r9,MSR_RI /* check if this exception occurred */
937 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
938
939 lwz r10,_CCR(r1)
940 lwz r11,_LINK(r1)
941 mtcrf 0xFF,r10
942 mtlr r11
943
944 /*
945 * Once we put values in SRR0 and SRR1, we are in a state
946 * where exceptions are not recoverable, since taking an
947 * exception will trash SRR0 and SRR1. Therefore we clear the
948 * MSR:RI bit to indicate this. If we do take an exception,
949 * we can't return to the point of the exception but we
950 * can restart the exception exit path at the label
951 * exc_exit_restart below. -- paulus
952 */
953 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
954 SYNC
955 MTMSRD(r10) /* clear the RI bit */
956 .globl exc_exit_restart
957exc_exit_restart:
9994a338 958 lwz r12,_NIP(r1)
75b82472
CL
959#ifdef CONFIG_PPC_8xx_PERF_EVENT
960 mtspr SPRN_NRI, r0
961#endif
9994a338
PM
962 mtspr SPRN_SRR0,r12
963 mtspr SPRN_SRR1,r9
964 REST_4GPRS(9, r1)
965 lwz r1,GPR1(r1)
966 .globl exc_exit_restart_end
967exc_exit_restart_end:
968 SYNC
969 RFI
970
971#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
972 /*
973 * This is a bit different on 4xx/Book-E because it doesn't have
974 * the RI bit in the MSR.
975 * The TLB miss handler checks if we have interrupted
976 * the exception exit path and restarts it if so
977 * (well maybe one day it will... :).
978 */
979 lwz r11,_LINK(r1)
980 mtlr r11
981 lwz r10,_CCR(r1)
982 mtcrf 0xff,r10
983 REST_2GPRS(9, r1)
984 .globl exc_exit_restart
985exc_exit_restart:
986 lwz r11,_NIP(r1)
987 lwz r12,_MSR(r1)
988exc_exit_start:
989 mtspr SPRN_SRR0,r11
990 mtspr SPRN_SRR1,r12
991 REST_2GPRS(11, r1)
992 lwz r1,GPR1(r1)
993 .globl exc_exit_restart_end
994exc_exit_restart_end:
995 PPC405_ERR77_SYNC
996 rfi
997 b . /* prevent prefetch past rfi */
998
999/*
1000 * Returning from a critical interrupt in user mode doesn't need
1001 * to be any different from a normal exception. For a critical
1002 * interrupt in the kernel, we just return (without checking for
1003 * preemption) since the interrupt may have happened at some crucial
1004 * place (e.g. inside the TLB miss handler), and because we will be
1005 * running with r1 pointing into critical_stack, not the current
1006 * process's kernel stack (and therefore current_thread_info() will
1007 * give the wrong answer).
1008 * We have to restore various SPRs that may have been in use at the
1009 * time of the critical interrupt.
1010 *
1011 */
1012#ifdef CONFIG_40x
1013#define PPC_40x_TURN_OFF_MSR_DR \
1014 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1015 * assume the instructions here are mapped by a pinned TLB entry */ \
1016 li r10,MSR_IR; \
1017 mtmsr r10; \
1018 isync; \
1019 tophys(r1, r1);
1020#else
1021#define PPC_40x_TURN_OFF_MSR_DR
1022#endif
1023
1024#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1025 REST_NVGPRS(r1); \
1026 lwz r3,_MSR(r1); \
1027 andi. r3,r3,MSR_PR; \
1028 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1029 bne user_exc_return; \
1030 lwz r0,GPR0(r1); \
1031 lwz r2,GPR2(r1); \
1032 REST_4GPRS(3, r1); \
1033 REST_2GPRS(7, r1); \
1034 lwz r10,_XER(r1); \
1035 lwz r11,_CTR(r1); \
1036 mtspr SPRN_XER,r10; \
1037 mtctr r11; \
1038 PPC405_ERR77(0,r1); \
1039 stwcx. r0,0,r1; /* to clear the reservation */ \
1040 lwz r11,_LINK(r1); \
1041 mtlr r11; \
1042 lwz r10,_CCR(r1); \
1043 mtcrf 0xff,r10; \
1044 PPC_40x_TURN_OFF_MSR_DR; \
1045 lwz r9,_DEAR(r1); \
1046 lwz r10,_ESR(r1); \
1047 mtspr SPRN_DEAR,r9; \
1048 mtspr SPRN_ESR,r10; \
1049 lwz r11,_NIP(r1); \
1050 lwz r12,_MSR(r1); \
1051 mtspr exc_lvl_srr0,r11; \
1052 mtspr exc_lvl_srr1,r12; \
1053 lwz r9,GPR9(r1); \
1054 lwz r12,GPR12(r1); \
1055 lwz r10,GPR10(r1); \
1056 lwz r11,GPR11(r1); \
1057 lwz r1,GPR1(r1); \
1058 PPC405_ERR77_SYNC; \
1059 exc_lvl_rfi; \
1060 b .; /* prevent prefetch past exc_lvl_rfi */
1061
fca622c5
KG
1062#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1063 lwz r9,_##exc_lvl_srr0(r1); \
1064 lwz r10,_##exc_lvl_srr1(r1); \
1065 mtspr SPRN_##exc_lvl_srr0,r9; \
1066 mtspr SPRN_##exc_lvl_srr1,r10;
1067
70fe3af8 1068#if defined(CONFIG_PPC_BOOK3E_MMU)
fca622c5
KG
1069#ifdef CONFIG_PHYS_64BIT
1070#define RESTORE_MAS7 \
1071 lwz r11,MAS7(r1); \
1072 mtspr SPRN_MAS7,r11;
1073#else
1074#define RESTORE_MAS7
1075#endif /* CONFIG_PHYS_64BIT */
1076#define RESTORE_MMU_REGS \
1077 lwz r9,MAS0(r1); \
1078 lwz r10,MAS1(r1); \
1079 lwz r11,MAS2(r1); \
1080 mtspr SPRN_MAS0,r9; \
1081 lwz r9,MAS3(r1); \
1082 mtspr SPRN_MAS1,r10; \
1083 lwz r10,MAS6(r1); \
1084 mtspr SPRN_MAS2,r11; \
1085 mtspr SPRN_MAS3,r9; \
1086 mtspr SPRN_MAS6,r10; \
1087 RESTORE_MAS7;
1088#elif defined(CONFIG_44x)
1089#define RESTORE_MMU_REGS \
1090 lwz r9,MMUCR(r1); \
1091 mtspr SPRN_MMUCR,r9;
1092#else
1093#define RESTORE_MMU_REGS
1094#endif
1095
1096#ifdef CONFIG_40x
9994a338
PM
1097 .globl ret_from_crit_exc
1098ret_from_crit_exc:
ee43eb78 1099 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1100 lis r10,saved_ksp_limit@ha;
1101 lwz r10,saved_ksp_limit@l(r10);
1102 tovirt(r9,r9);
1103 stw r10,KSP_LIMIT(r9)
1104 lis r9,crit_srr0@ha;
1105 lwz r9,crit_srr0@l(r9);
1106 lis r10,crit_srr1@ha;
1107 lwz r10,crit_srr1@l(r10);
1108 mtspr SPRN_SRR0,r9;
1109 mtspr SPRN_SRR1,r10;
16c57b36 1110 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1111#endif /* CONFIG_40x */
9994a338
PM
1112
1113#ifdef CONFIG_BOOKE
fca622c5
KG
1114 .globl ret_from_crit_exc
1115ret_from_crit_exc:
ee43eb78 1116 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1117 lwz r10,SAVED_KSP_LIMIT(r1)
1118 stw r10,KSP_LIMIT(r9)
1119 RESTORE_xSRR(SRR0,SRR1);
1120 RESTORE_MMU_REGS;
16c57b36 1121 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1122
9994a338
PM
1123 .globl ret_from_debug_exc
1124ret_from_debug_exc:
ee43eb78 1125 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1126 lwz r10,SAVED_KSP_LIMIT(r1)
1127 stw r10,KSP_LIMIT(r9)
1128 lwz r9,THREAD_INFO-THREAD(r9)
9778b696 1129 CURRENT_THREAD_INFO(r10, r1)
fca622c5
KG
1130 lwz r10,TI_PREEMPT(r10)
1131 stw r10,TI_PREEMPT(r9)
1132 RESTORE_xSRR(SRR0,SRR1);
1133 RESTORE_xSRR(CSRR0,CSRR1);
1134 RESTORE_MMU_REGS;
16c57b36 1135 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
9994a338
PM
1136
1137 .globl ret_from_mcheck_exc
1138ret_from_mcheck_exc:
ee43eb78 1139 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1140 lwz r10,SAVED_KSP_LIMIT(r1)
1141 stw r10,KSP_LIMIT(r9)
1142 RESTORE_xSRR(SRR0,SRR1);
1143 RESTORE_xSRR(CSRR0,CSRR1);
1144 RESTORE_xSRR(DSRR0,DSRR1);
1145 RESTORE_MMU_REGS;
16c57b36 1146 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
9994a338
PM
1147#endif /* CONFIG_BOOKE */
1148
1149/*
1150 * Load the DBCR0 value for a task that is being ptraced,
1151 * having first saved away the global DBCR0. Note that r0
1152 * has the dbcr0 value to set upon entry to this.
1153 */
1154load_dbcr0:
1155 mfmsr r10 /* first disable debug exceptions */
1156 rlwinm r10,r10,0,~MSR_DE
1157 mtmsr r10
1158 isync
1159 mfspr r10,SPRN_DBCR0
1160 lis r11,global_dbcr0@ha
1161 addi r11,r11,global_dbcr0@l
4eaddb4d 1162#ifdef CONFIG_SMP
9778b696 1163 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
1164 lwz r9,TI_CPU(r9)
1165 slwi r9,r9,3
1166 add r11,r11,r9
1167#endif
9994a338
PM
1168 stw r10,0(r11)
1169 mtspr SPRN_DBCR0,r0
1170 lwz r10,4(r11)
1171 addi r10,r10,1
1172 stw r10,4(r11)
1173 li r11,-1
1174 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1175 blr
1176
991eb43a
KG
1177 .section .bss
1178 .align 4
1179global_dbcr0:
4eaddb4d 1180 .space 8*NR_CPUS
991eb43a 1181 .previous
9994a338
PM
1182#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1183
1184do_work: /* r10 contains MSR_KERNEL here */
1185 andi. r0,r9,_TIF_NEED_RESCHED
1186 beq do_user_signal
1187
1188do_resched: /* r10 contains MSR_KERNEL here */
5d38902c
BH
1189 /* Note: We don't need to inform lockdep that we are enabling
1190 * interrupts here. As far as it knows, they are already enabled
1191 */
9994a338
PM
1192 ori r10,r10,MSR_EE
1193 SYNC
1194 MTMSRD(r10) /* hard-enable interrupts */
1195 bl schedule
1196recheck:
5d38902c
BH
1197 /* Note: And we don't tell it we are disabling them again
1198 * neither. Those disable/enable cycles used to peek at
1199 * TI_FLAGS aren't advertised.
1200 */
9994a338
PM
1201 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1202 SYNC
1203 MTMSRD(r10) /* disable interrupts */
9778b696 1204 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
1205 lwz r9,TI_FLAGS(r9)
1206 andi. r0,r9,_TIF_NEED_RESCHED
1207 bne- do_resched
7a10174e 1208 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
1209 beq restore_user
1210do_user_signal: /* r10 contains MSR_KERNEL here */
1211 ori r10,r10,MSR_EE
1212 SYNC
1213 MTMSRD(r10) /* hard-enable interrupts */
1214 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 1215 lwz r3,_TRAP(r1)
9994a338
PM
1216 andi. r0,r3,1
1217 beq 2f
1218 SAVE_NVGPRS(r1)
1219 rlwinm r3,r3,0,0,30
d73e0c99 1220 stw r3,_TRAP(r1)
7d6d637d
RM
12212: addi r3,r1,STACK_FRAME_OVERHEAD
1222 mr r4,r9
18b246fa 1223 bl do_notify_resume
9994a338
PM
1224 REST_NVGPRS(r1)
1225 b recheck
1226
1227/*
1228 * We come here when we are at the end of handling an exception
1229 * that occurred at a place where taking an exception will lose
1230 * state information, such as the contents of SRR0 and SRR1.
1231 */
1232nonrecoverable:
1233 lis r10,exc_exit_restart_end@ha
1234 addi r10,r10,exc_exit_restart_end@l
1235 cmplw r12,r10
1236 bge 3f
1237 lis r11,exc_exit_restart@ha
1238 addi r11,r11,exc_exit_restart@l
1239 cmplw r12,r11
1240 blt 3f
1241 lis r10,ee_restarts@ha
1242 lwz r12,ee_restarts@l(r10)
1243 addi r12,r12,1
1244 stw r12,ee_restarts@l(r10)
1245 mr r12,r11 /* restart at exc_exit_restart */
1246 blr
12473: /* OK, we can't recover, kill this process */
1248 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1249BEGIN_FTR_SECTION
1250 blr
1251END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 1252 lwz r3,_TRAP(r1)
9994a338
PM
1253 andi. r0,r3,1
1254 beq 4f
1255 SAVE_NVGPRS(r1)
1256 rlwinm r3,r3,0,0,30
d73e0c99 1257 stw r3,_TRAP(r1)
9994a338
PM
12584: addi r3,r1,STACK_FRAME_OVERHEAD
1259 bl nonrecoverable_exception
1260 /* shouldn't return */
1261 b 4b
1262
991eb43a
KG
1263 .section .bss
1264 .align 2
1265ee_restarts:
1266 .space 4
1267 .previous
9994a338
PM
1268
1269/*
1270 * PROM code for specific machines follows. Put it
1271 * here so it's easy to add arch-specific sections later.
1272 * -- Cort
1273 */
033ef338 1274#ifdef CONFIG_PPC_RTAS
9994a338
PM
1275/*
1276 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1277 * called with the MMU off.
1278 */
1279_GLOBAL(enter_rtas)
1280 stwu r1,-INT_FRAME_SIZE(r1)
1281 mflr r0
1282 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1283 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1284 lis r6,1f@ha /* physical return address for rtas */
1285 addi r6,r6,1f@l
1286 tophys(r6,r6)
1287 tophys(r7,r1)
033ef338
PM
1288 lwz r8,RTASENTRY(r4)
1289 lwz r4,RTASBASE(r4)
9994a338
PM
1290 mfmsr r9
1291 stw r9,8(r1)
1292 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1293 SYNC /* disable interrupts so SRR0/1 */
1294 MTMSRD(r0) /* don't get trashed */
1295 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1296 mtlr r6
ee43eb78 1297 mtspr SPRN_SPRG_RTAS,r7
9994a338
PM
1298 mtspr SPRN_SRR0,r8
1299 mtspr SPRN_SRR1,r9
1300 RFI
13011: tophys(r9,r1)
1302 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1303 lwz r9,8(r9) /* original msr value */
9994a338
PM
1304 addi r1,r1,INT_FRAME_SIZE
1305 li r0,0
ee43eb78 1306 mtspr SPRN_SPRG_RTAS,r0
9994a338
PM
1307 mtspr SPRN_SRR0,r8
1308 mtspr SPRN_SRR1,r9
1309 RFI /* return to caller */
1310
1311 .globl machine_check_in_rtas
1312machine_check_in_rtas:
1313 twi 31,0,0
1314 /* XXX load up BATs and panic */
1315
033ef338 1316#endif /* CONFIG_PPC_RTAS */