powerpc/idle/6xx: Use r1 with CURRENT_THREAD_INFO()
[linux-2.6-block.git] / arch / powerpc / kernel / entry_32.S
CommitLineData
9994a338
PM
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 */
21
9994a338 22#include <linux/errno.h>
c3525940 23#include <linux/err.h>
9994a338
PM
24#include <linux/sys.h>
25#include <linux/threads.h>
26#include <asm/reg.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/cputable.h>
30#include <asm/thread_info.h>
31#include <asm/ppc_asm.h>
32#include <asm/asm-offsets.h>
33#include <asm/unistd.h>
46f52210 34#include <asm/ptrace.h>
9445aa1a 35#include <asm/export.h>
36a7eeaf 36#include <asm/asm-405.h>
2c86cd18 37#include <asm/feature-fixups.h>
c28218d4 38#include <asm/barrier.h>
9994a338 39
9994a338
PM
40/*
41 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
42 */
43#if MSR_KERNEL >= 0x10000
44#define LOAD_MSR_KERNEL(r, x) lis r,(x)@h; ori r,r,(x)@l
45#else
46#define LOAD_MSR_KERNEL(r, x) li r,(x)
47#endif
48
0eb0d2e7
CL
49/*
50 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
51 * fit into one page in order to not encounter a TLB miss between the
52 * modification of srr0/srr1 and the associated rfi.
53 */
54 .align 12
55
9994a338 56#ifdef CONFIG_BOOKE
9994a338
PM
57 .globl mcheck_transfer_to_handler
58mcheck_transfer_to_handler:
fca622c5
KG
59 mfspr r0,SPRN_DSRR0
60 stw r0,_DSRR0(r11)
61 mfspr r0,SPRN_DSRR1
62 stw r0,_DSRR1(r11)
63 /* fall through */
9994a338
PM
64
65 .globl debug_transfer_to_handler
66debug_transfer_to_handler:
fca622c5
KG
67 mfspr r0,SPRN_CSRR0
68 stw r0,_CSRR0(r11)
69 mfspr r0,SPRN_CSRR1
70 stw r0,_CSRR1(r11)
71 /* fall through */
9994a338
PM
72
73 .globl crit_transfer_to_handler
74crit_transfer_to_handler:
70fe3af8 75#ifdef CONFIG_PPC_BOOK3E_MMU
fca622c5
KG
76 mfspr r0,SPRN_MAS0
77 stw r0,MAS0(r11)
78 mfspr r0,SPRN_MAS1
79 stw r0,MAS1(r11)
80 mfspr r0,SPRN_MAS2
81 stw r0,MAS2(r11)
82 mfspr r0,SPRN_MAS3
83 stw r0,MAS3(r11)
84 mfspr r0,SPRN_MAS6
85 stw r0,MAS6(r11)
86#ifdef CONFIG_PHYS_64BIT
87 mfspr r0,SPRN_MAS7
88 stw r0,MAS7(r11)
89#endif /* CONFIG_PHYS_64BIT */
70fe3af8 90#endif /* CONFIG_PPC_BOOK3E_MMU */
fca622c5
KG
91#ifdef CONFIG_44x
92 mfspr r0,SPRN_MMUCR
93 stw r0,MMUCR(r11)
94#endif
95 mfspr r0,SPRN_SRR0
96 stw r0,_SRR0(r11)
97 mfspr r0,SPRN_SRR1
98 stw r0,_SRR1(r11)
99
1f8b0bc8
SY
100 /* set the stack limit to the current stack
101 * and set the limit to protect the thread_info
102 * struct
103 */
ee43eb78 104 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
105 lwz r0,KSP_LIMIT(r8)
106 stw r0,SAVED_KSP_LIMIT(r11)
1f8b0bc8 107 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 108 stw r0,KSP_LIMIT(r8)
9994a338
PM
109 /* fall through */
110#endif
111
112#ifdef CONFIG_40x
113 .globl crit_transfer_to_handler
114crit_transfer_to_handler:
115 lwz r0,crit_r10@l(0)
116 stw r0,GPR10(r11)
117 lwz r0,crit_r11@l(0)
118 stw r0,GPR11(r11)
fca622c5
KG
119 mfspr r0,SPRN_SRR0
120 stw r0,crit_srr0@l(0)
121 mfspr r0,SPRN_SRR1
122 stw r0,crit_srr1@l(0)
123
1f8b0bc8
SY
124 /* set the stack limit to the current stack
125 * and set the limit to protect the thread_info
126 * struct
127 */
ee43eb78 128 mfspr r8,SPRN_SPRG_THREAD
fca622c5
KG
129 lwz r0,KSP_LIMIT(r8)
130 stw r0,saved_ksp_limit@l(0)
1f8b0bc8 131 rlwimi r0,r1,0,0,(31-THREAD_SHIFT)
fca622c5 132 stw r0,KSP_LIMIT(r8)
9994a338
PM
133 /* fall through */
134#endif
135
136/*
137 * This code finishes saving the registers to the exception frame
138 * and jumps to the appropriate handler for the exception, turning
139 * on address translation.
140 * Note that we rely on the caller having set cr0.eq iff the exception
141 * occurred in kernel mode (i.e. MSR:PR = 0).
142 */
143 .globl transfer_to_handler_full
144transfer_to_handler_full:
145 SAVE_NVGPRS(r11)
146 /* fall through */
147
148 .globl transfer_to_handler
149transfer_to_handler:
150 stw r2,GPR2(r11)
151 stw r12,_NIP(r11)
152 stw r9,_MSR(r11)
153 andi. r2,r9,MSR_PR
154 mfctr r12
155 mfspr r2,SPRN_XER
156 stw r12,_CTR(r11)
157 stw r2,_XER(r11)
ee43eb78 158 mfspr r12,SPRN_SPRG_THREAD
9994a338
PM
159 addi r2,r12,-THREAD
160 tovirt(r2,r2) /* set r2 to current */
161 beq 2f /* if from user, fix up THREAD.regs */
162 addi r11,r1,STACK_FRAME_OVERHEAD
163 stw r11,PT_REGS(r12)
164#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
165 /* Check to see if the dbcr0 register is set up to debug. Use the
4eaddb4d 166 internal debug mode bit to do this. */
9994a338 167 lwz r12,THREAD_DBCR0(r12)
2325f0a0 168 andis. r12,r12,DBCR0_IDM@h
6b9166f0
CL
169#endif
170#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
171 CURRENT_THREAD_INFO(r9, r1)
172 tophys(r9, r9)
173 ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
174#endif
175#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
9994a338
PM
176 beq+ 3f
177 /* From user and task is ptraced - load up global dbcr0 */
178 li r12,-1 /* clear all pending debug events */
179 mtspr SPRN_DBSR,r12
180 lis r11,global_dbcr0@ha
181 tophys(r11,r11)
182 addi r11,r11,global_dbcr0@l
4eaddb4d 183#ifdef CONFIG_SMP
9778b696 184 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
185 lwz r9,TI_CPU(r9)
186 slwi r9,r9,3
187 add r11,r11,r9
188#endif
9994a338
PM
189 lwz r12,0(r11)
190 mtspr SPRN_DBCR0,r12
191 lwz r12,4(r11)
192 addi r12,r12,-1
193 stw r12,4(r11)
194#endif
c223c903 195
9994a338 196 b 3f
f39224a8 197
9994a338
PM
1982: /* if from kernel, check interrupted DOZE/NAP mode and
199 * check for stack overflow
200 */
85218827
KG
201 lwz r9,KSP_LIMIT(r12)
202 cmplw r1,r9 /* if r1 <= ksp_limit */
f39224a8
PM
203 ble- stack_ovf /* then the kernel stack overflowed */
2045:
d7cceda9 205#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
9778b696 206 CURRENT_THREAD_INFO(r9, r1)
f39224a8
PM
207 tophys(r9,r9) /* check local flags */
208 lwz r12,TI_LOCAL_FLAGS(r9)
209 mtcrf 0x01,r12
210 bt- 31-TLF_NAPPING,4f
a560643e 211 bt- 31-TLF_SLEEPING,7f
d7cceda9 212#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
9994a338
PM
213 .globl transfer_to_handler_cont
214transfer_to_handler_cont:
9994a338
PM
2153:
216 mflr r9
217 lwz r11,0(r9) /* virtual address of handler */
218 lwz r9,4(r9) /* where to go when done */
cd99ddbe 219#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
220 mtspr SPRN_NRI, r0
221#endif
5d38902c
BH
222#ifdef CONFIG_TRACE_IRQFLAGS
223 lis r12,reenable_mmu@h
224 ori r12,r12,reenable_mmu@l
225 mtspr SPRN_SRR0,r12
226 mtspr SPRN_SRR1,r10
227 SYNC
228 RFI
229reenable_mmu: /* re-enable mmu so we can */
230 mfmsr r10
231 lwz r12,_MSR(r1)
232 xor r10,r10,r12
233 andi. r10,r10,MSR_EE /* Did EE change? */
234 beq 1f
235
2cd76629
KH
236 /*
237 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
238 * If from user mode there is only one stack frame on the stack, and
239 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
240 * stack frame to make trace_hardirqs_off happy.
08f1ec8a
BH
241 *
242 * This is handy because we also need to save a bunch of GPRs,
243 * r3 can be different from GPR3(r1) at this point, r9 and r11
244 * contains the old MSR and handler address respectively,
245 * r4 & r5 can contain page fault arguments that need to be passed
246 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
247 * they aren't useful past this point (aren't syscall arguments),
248 * the rest is restored from the exception frame.
2cd76629 249 */
08f1ec8a
BH
250 stwu r1,-32(r1)
251 stw r9,8(r1)
252 stw r11,12(r1)
253 stw r3,16(r1)
254 stw r4,20(r1)
255 stw r5,24(r1)
2cd76629 256 bl trace_hardirqs_off
08f1ec8a
BH
257 lwz r5,24(r1)
258 lwz r4,20(r1)
259 lwz r3,16(r1)
260 lwz r11,12(r1)
261 lwz r9,8(r1)
262 addi r1,r1,32
5d38902c 263 lwz r0,GPR0(r1)
5d38902c
BH
264 lwz r6,GPR6(r1)
265 lwz r7,GPR7(r1)
266 lwz r8,GPR8(r1)
5d38902c
BH
2671: mtctr r11
268 mtlr r9
269 bctr /* jump to handler */
270#else /* CONFIG_TRACE_IRQFLAGS */
9994a338
PM
271 mtspr SPRN_SRR0,r11
272 mtspr SPRN_SRR1,r10
273 mtlr r9
274 SYNC
275 RFI /* jump to handler, enable MMU */
5d38902c 276#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 277
d7cceda9 278#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
f39224a8
PM
2794: rlwinm r12,r12,0,~_TLF_NAPPING
280 stw r12,TI_LOCAL_FLAGS(r9)
fc4033b2 281 b power_save_ppc32_restore
a560643e
PM
282
2837: rlwinm r12,r12,0,~_TLF_SLEEPING
284 stw r12,TI_LOCAL_FLAGS(r9)
285 lwz r9,_MSR(r11) /* if sleeping, clear MSR.EE */
286 rlwinm r9,r9,0,~MSR_EE
287 lwz r12,_LINK(r11) /* and return to address in LR */
288 b fast_exception_return
a0652fc9
PM
289#endif
290
9994a338
PM
291/*
292 * On kernel stack overflow, load up an initial stack pointer
293 * and call StackOverflow(regs), which should not return.
294 */
295stack_ovf:
296 /* sometimes we use a statically-allocated stack, which is OK. */
f39224a8
PM
297 lis r12,_end@h
298 ori r12,r12,_end@l
299 cmplw r1,r12
300 ble 5b /* r1 <= &_end is OK */
9994a338
PM
301 SAVE_NVGPRS(r11)
302 addi r3,r1,STACK_FRAME_OVERHEAD
303 lis r1,init_thread_union@ha
304 addi r1,r1,init_thread_union@l
305 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
306 lis r9,StackOverflow@ha
307 addi r9,r9,StackOverflow@l
308 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
cd99ddbe 309#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
310 mtspr SPRN_NRI, r0
311#endif
9994a338
PM
312 mtspr SPRN_SRR0,r9
313 mtspr SPRN_SRR1,r10
314 SYNC
315 RFI
316
317/*
318 * Handle a system call.
319 */
320 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
321 .stabs "entry_32.S",N_SO,0,0,0f
3220:
323
324_GLOBAL(DoSyscall)
9994a338
PM
325 stw r3,ORIG_GPR3(r1)
326 li r12,0
327 stw r12,RESULT(r1)
328 lwz r11,_CCR(r1) /* Clear SO bit in CR */
329 rlwinm r11,r11,0,4,2
330 stw r11,_CCR(r1)
5d38902c
BH
331#ifdef CONFIG_TRACE_IRQFLAGS
332 /* Return from syscalls can (and generally will) hard enable
333 * interrupts. You aren't supposed to call a syscall with
334 * interrupts disabled in the first place. However, to ensure
335 * that we get it right vs. lockdep if it happens, we force
336 * that hard enable here with appropriate tracing if we see
337 * that we have been called with interrupts off
338 */
339 mfmsr r11
340 andi. r12,r11,MSR_EE
341 bne+ 1f
342 /* We came in with interrupts disabled, we enable them now */
343 bl trace_hardirqs_on
344 mfmsr r11
345 lwz r0,GPR0(r1)
346 lwz r3,GPR3(r1)
347 lwz r4,GPR4(r1)
348 ori r11,r11,MSR_EE
349 lwz r5,GPR5(r1)
350 lwz r6,GPR6(r1)
351 lwz r7,GPR7(r1)
352 lwz r8,GPR8(r1)
353 mtmsr r11
3541:
355#endif /* CONFIG_TRACE_IRQFLAGS */
9778b696 356 CURRENT_THREAD_INFO(r10, r1)
9994a338 357 lwz r11,TI_FLAGS(r10)
10ea8343 358 andi. r11,r11,_TIF_SYSCALL_DOTRACE
9994a338
PM
359 bne- syscall_dotrace
360syscall_dotrace_cont:
361 cmplwi 0,r0,NR_syscalls
362 lis r10,sys_call_table@h
363 ori r10,r10,sys_call_table@l
364 slwi r0,r0,2
365 bge- 66f
c28218d4
DC
366
367 barrier_nospec_asm
368 /*
369 * Prevent the load of the handler below (based on the user-passed
370 * system call number) being speculatively executed until the test
371 * against NR_syscalls and branch to .66f above has
372 * committed.
373 */
374
9994a338
PM
375 lwzx r10,r10,r0 /* Fetch system call handler [ptr] */
376 mtlr r10
377 addi r9,r1,STACK_FRAME_OVERHEAD
378 PPC440EP_ERR42
379 blrl /* Call handler */
380 .globl ret_from_syscall
381ret_from_syscall:
6f37be4b
BF
382#ifdef CONFIG_DEBUG_RSEQ
383 /* Check whether the syscall is issued inside a restartable sequence */
384 stw r3,GPR3(r1)
385 addi r3,r1,STACK_FRAME_OVERHEAD
386 bl rseq_syscall
387 lwz r3,GPR3(r1)
388#endif
9994a338 389 mr r6,r3
9778b696 390 CURRENT_THREAD_INFO(r12, r1)
9994a338 391 /* disable interrupts so current_thread_info()->flags can't change */
401d1f02 392 LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
5d38902c 393 /* Note: We don't bother telling lockdep about it */
9994a338
PM
394 SYNC
395 MTMSRD(r10)
396 lwz r9,TI_FLAGS(r12)
c3525940 397 li r8,-MAX_ERRNO
10ea8343 398 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 399 bne- syscall_exit_work
401d1f02
DW
400 cmplw 0,r3,r8
401 blt+ syscall_exit_cont
402 lwz r11,_CCR(r1) /* Load CR */
403 neg r3,r3
404 oris r11,r11,0x1000 /* Set SO bit in CR */
405 stw r11,_CCR(r1)
9994a338 406syscall_exit_cont:
5d38902c
BH
407 lwz r8,_MSR(r1)
408#ifdef CONFIG_TRACE_IRQFLAGS
409 /* If we are going to return from the syscall with interrupts
410 * off, we trace that here. It shouldn't happen though but we
411 * want to catch the bugger if it does right ?
412 */
413 andi. r10,r8,MSR_EE
414 bne+ 1f
415 stw r3,GPR3(r1)
416 bl trace_hardirqs_off
417 lwz r3,GPR3(r1)
4181:
419#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 420#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
421 /* If the process has its own DBCR0 value, load it up. The internal
422 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 423 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 424 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
425 bnel- load_dbcr0
426#endif
b98ac05d 427#ifdef CONFIG_44x
e7f75ad0 428BEGIN_MMU_FTR_SECTION
b98ac05d
BH
429 lis r4,icache_44x_need_flush@ha
430 lwz r5,icache_44x_need_flush@l(r4)
431 cmplwi cr0,r5,0
432 bne- 2f
4331:
e7f75ad0 434END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
b98ac05d 435#endif /* CONFIG_44x */
b64f87c1
BB
436BEGIN_FTR_SECTION
437 lwarx r7,0,r1
438END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338 439 stwcx. r0,0,r1 /* to clear the reservation */
c223c903
CL
440#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
441 andi. r4,r8,MSR_PR
442 beq 3f
443 CURRENT_THREAD_INFO(r4, r1)
444 ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
4453:
446#endif
9994a338
PM
447 lwz r4,_LINK(r1)
448 lwz r5,_CCR(r1)
449 mtlr r4
450 mtcr r5
451 lwz r7,_NIP(r1)
9994a338
PM
452 lwz r2,GPR2(r1)
453 lwz r1,GPR1(r1)
cd99ddbe 454#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
455 mtspr SPRN_NRI, r0
456#endif
9994a338
PM
457 mtspr SPRN_SRR0,r7
458 mtspr SPRN_SRR1,r8
459 SYNC
460 RFI
b98ac05d
BH
461#ifdef CONFIG_44x
4622: li r7,0
463 iccci r0,r0
464 stw r7,icache_44x_need_flush@l(r4)
465 b 1b
466#endif /* CONFIG_44x */
9994a338
PM
467
46866: li r3,-ENOSYS
469 b ret_from_syscall
470
471 .globl ret_from_fork
472ret_from_fork:
473 REST_NVGPRS(r1)
474 bl schedule_tail
475 li r3,0
476 b ret_from_syscall
477
58254e10
AV
478 .globl ret_from_kernel_thread
479ret_from_kernel_thread:
480 REST_NVGPRS(r1)
481 bl schedule_tail
482 mtlr r14
483 mr r3,r15
484 PPC440EP_ERR42
485 blrl
486 li r3,0
be6abfa7 487 b ret_from_syscall
9994a338
PM
488
489/* Traced system call support */
490syscall_dotrace:
491 SAVE_NVGPRS(r1)
492 li r0,0xc00
d73e0c99 493 stw r0,_TRAP(r1)
9994a338
PM
494 addi r3,r1,STACK_FRAME_OVERHEAD
495 bl do_syscall_trace_enter
4f72c427
RM
496 /*
497 * Restore argument registers possibly just changed.
498 * We use the return value of do_syscall_trace_enter
499 * for call number to look up in the table (r0).
500 */
501 mr r0,r3
9994a338
PM
502 lwz r3,GPR3(r1)
503 lwz r4,GPR4(r1)
504 lwz r5,GPR5(r1)
505 lwz r6,GPR6(r1)
506 lwz r7,GPR7(r1)
507 lwz r8,GPR8(r1)
508 REST_NVGPRS(r1)
d3837414
ME
509
510 cmplwi r0,NR_syscalls
511 /* Return code is already in r3 thanks to do_syscall_trace_enter() */
512 bge- ret_from_syscall
9994a338
PM
513 b syscall_dotrace_cont
514
515syscall_exit_work:
401d1f02 516 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
517 beq+ 0f
518 REST_NVGPRS(r1)
519 b 2f
5200: cmplw 0,r3,r8
401d1f02
DW
521 blt+ 1f
522 andi. r0,r9,_TIF_NOERROR
523 bne- 1f
524 lwz r11,_CCR(r1) /* Load CR */
525 neg r3,r3
526 oris r11,r11,0x1000 /* Set SO bit in CR */
527 stw r11,_CCR(r1)
528
5291: stw r6,RESULT(r1) /* Save result */
9994a338 530 stw r3,GPR3(r1) /* Update return value */
401d1f02
DW
5312: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
532 beq 4f
533
1bd79336 534 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
535
536 li r11,_TIF_PERSYSCALL_MASK
537 addi r12,r12,TI_FLAGS
5383: lwarx r8,0,r12
539 andc r8,r8,r11
540#ifdef CONFIG_IBM405_ERR77
541 dcbt 0,r12
542#endif
543 stwcx. r8,0,r12
544 bne- 3b
545 subi r12,r12,TI_FLAGS
546
5474: /* Anything which requires enabling interrupts? */
10ea8343 548 andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
1bd79336
PM
549 beq ret_from_except
550
5d38902c
BH
551 /* Re-enable interrupts. There is no need to trace that with
552 * lockdep as we are supposed to have IRQs on at this point
553 */
1bd79336
PM
554 ori r10,r10,MSR_EE
555 SYNC
556 MTMSRD(r10)
401d1f02
DW
557
558 /* Save NVGPRS if they're not saved already */
d73e0c99 559 lwz r4,_TRAP(r1)
9994a338 560 andi. r4,r4,1
401d1f02 561 beq 5f
9994a338
PM
562 SAVE_NVGPRS(r1)
563 li r4,0xc00
d73e0c99 564 stw r4,_TRAP(r1)
1bd79336 5655:
9994a338
PM
566 addi r3,r1,STACK_FRAME_OVERHEAD
567 bl do_syscall_trace_leave
1bd79336 568 b ret_from_except_full
9994a338 569
9994a338 570/*
401d1f02
DW
571 * The fork/clone functions need to copy the full register set into
572 * the child process. Therefore we need to save all the nonvolatile
573 * registers (r13 - r31) before calling the C code.
9994a338 574 */
9994a338
PM
575 .globl ppc_fork
576ppc_fork:
577 SAVE_NVGPRS(r1)
d73e0c99 578 lwz r0,_TRAP(r1)
9994a338 579 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 580 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
581 b sys_fork
582
583 .globl ppc_vfork
584ppc_vfork:
585 SAVE_NVGPRS(r1)
d73e0c99 586 lwz r0,_TRAP(r1)
9994a338 587 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 588 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
589 b sys_vfork
590
591 .globl ppc_clone
592ppc_clone:
593 SAVE_NVGPRS(r1)
d73e0c99 594 lwz r0,_TRAP(r1)
9994a338 595 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
d73e0c99 596 stw r0,_TRAP(r1) /* register set saved */
9994a338
PM
597 b sys_clone
598
1bd79336
PM
599 .globl ppc_swapcontext
600ppc_swapcontext:
601 SAVE_NVGPRS(r1)
602 lwz r0,_TRAP(r1)
603 rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */
604 stw r0,_TRAP(r1) /* register set saved */
605 b sys_swapcontext
606
9994a338
PM
607/*
608 * Top-level page fault handling.
609 * This is in assembler because if do_page_fault tells us that
610 * it is a bad kernel page fault, we want to save the non-volatile
611 * registers before calling bad_page_fault.
612 */
613 .globl handle_page_fault
614handle_page_fault:
615 stw r4,_DAR(r1)
616 addi r3,r1,STACK_FRAME_OVERHEAD
d7cceda9 617#ifdef CONFIG_PPC_BOOK3S_32
64d0a506 618 andis. r0,r5,DSISR_DABRMATCH@h
d300627c 619 bne- handle_dabr_fault
d300627c 620#endif
64d0a506 621 bl do_page_fault
9994a338
PM
622 cmpwi r3,0
623 beq+ ret_from_except
624 SAVE_NVGPRS(r1)
d73e0c99 625 lwz r0,_TRAP(r1)
9994a338 626 clrrwi r0,r0,1
d73e0c99 627 stw r0,_TRAP(r1)
9994a338
PM
628 mr r5,r3
629 addi r3,r1,STACK_FRAME_OVERHEAD
630 lwz r4,_DAR(r1)
631 bl bad_page_fault
632 b ret_from_except_full
633
d7cceda9 634#ifdef CONFIG_PPC_BOOK3S_32
d300627c
BH
635 /* We have a data breakpoint exception - handle it */
636handle_dabr_fault:
637 SAVE_NVGPRS(r1)
638 lwz r0,_TRAP(r1)
639 clrrwi r0,r0,1
640 stw r0,_TRAP(r1)
641 bl do_break
642 b ret_from_except_full
643#endif
644
9994a338
PM
645/*
646 * This routine switches between two different tasks. The process
647 * state of one is saved on its kernel stack. Then the state
648 * of the other is restored from its kernel stack. The memory
649 * management hardware is updated to the second process's state.
650 * Finally, we can return to the second process.
651 * On entry, r3 points to the THREAD for the current task, r4
652 * points to the THREAD for the new task.
653 *
654 * This routine is always called with interrupts disabled.
655 *
656 * Note: there are two ways to get to the "going out" portion
657 * of this code; either by coming in via the entry (_switch)
658 * or via "fork" which must set up an environment equivalent
659 * to the "_switch" path. If you change this , you'll have to
660 * change the fork code also.
661 *
662 * The code which creates the new task context is in 'copy_thread'
663 * in arch/ppc/kernel/process.c
664 */
665_GLOBAL(_switch)
666 stwu r1,-INT_FRAME_SIZE(r1)
667 mflr r0
668 stw r0,INT_FRAME_SIZE+4(r1)
669 /* r3-r12 are caller saved -- Cort */
670 SAVE_NVGPRS(r1)
671 stw r0,_NIP(r1) /* Return to switch caller */
672 mfmsr r11
673 li r0,MSR_FP /* Disable floating-point */
674#ifdef CONFIG_ALTIVEC
675BEGIN_FTR_SECTION
676 oris r0,r0,MSR_VEC@h /* Disable altivec */
677 mfspr r12,SPRN_VRSAVE /* save vrsave register value */
678 stw r12,THREAD+THREAD_VRSAVE(r2)
679END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
680#endif /* CONFIG_ALTIVEC */
681#ifdef CONFIG_SPE
5e14d21e 682BEGIN_FTR_SECTION
9994a338
PM
683 oris r0,r0,MSR_SPE@h /* Disable SPE */
684 mfspr r12,SPRN_SPEFSCR /* save spefscr register value */
685 stw r12,THREAD+THREAD_SPEFSCR(r2)
5e14d21e 686END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338
PM
687#endif /* CONFIG_SPE */
688 and. r0,r0,r11 /* FP or altivec or SPE enabled? */
689 beq+ 1f
690 andc r11,r11,r0
691 MTMSRD(r11)
692 isync
6931: stw r11,_MSR(r1)
694 mfcr r10
695 stw r10,_CCR(r1)
696 stw r1,KSP(r3) /* Set old stack pointer */
697
698#ifdef CONFIG_SMP
699 /* We need a sync somewhere here to make sure that if the
700 * previous task gets rescheduled on another CPU, it sees all
701 * stores it has performed on this one.
702 */
703 sync
704#endif /* CONFIG_SMP */
705
706 tophys(r0,r4)
ee43eb78 707 mtspr SPRN_SPRG_THREAD,r0 /* Update current THREAD phys addr */
9994a338
PM
708 lwz r1,KSP(r4) /* Load new stack pointer */
709
710 /* save the old current 'last' for return value */
711 mr r3,r2
712 addi r2,r4,-THREAD /* Update current */
713
714#ifdef CONFIG_ALTIVEC
715BEGIN_FTR_SECTION
716 lwz r0,THREAD+THREAD_VRSAVE(r2)
717 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
718END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
719#endif /* CONFIG_ALTIVEC */
720#ifdef CONFIG_SPE
5e14d21e 721BEGIN_FTR_SECTION
9994a338
PM
722 lwz r0,THREAD+THREAD_SPEFSCR(r2)
723 mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
5e14d21e 724END_FTR_SECTION_IFSET(CPU_FTR_SPE)
9994a338 725#endif /* CONFIG_SPE */
f2574030 726
9994a338
PM
727 lwz r0,_CCR(r1)
728 mtcrf 0xFF,r0
729 /* r3-r12 are destroyed -- Cort */
730 REST_NVGPRS(r1)
731
732 lwz r4,_NIP(r1) /* Return to _switch caller in new task */
733 mtlr r4
734 addi r1,r1,INT_FRAME_SIZE
735 blr
736
737 .globl fast_exception_return
738fast_exception_return:
739#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
740 andi. r10,r9,MSR_RI /* check for recoverable interrupt */
741 beq 1f /* if not, we've got problems */
742#endif
743
7442: REST_4GPRS(3, r11)
745 lwz r10,_CCR(r11)
746 REST_GPR(1, r11)
747 mtcr r10
748 lwz r10,_LINK(r11)
749 mtlr r10
750 REST_GPR(10, r11)
cd99ddbe 751#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
75b82472
CL
752 mtspr SPRN_NRI, r0
753#endif
9994a338
PM
754 mtspr SPRN_SRR1,r9
755 mtspr SPRN_SRR0,r12
756 REST_GPR(9, r11)
757 REST_GPR(12, r11)
758 lwz r11,GPR11(r11)
759 SYNC
760 RFI
761
762#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
763/* check if the exception happened in a restartable section */
7641: lis r3,exc_exit_restart_end@ha
765 addi r3,r3,exc_exit_restart_end@l
766 cmplw r12,r3
767 bge 3f
768 lis r4,exc_exit_restart@ha
769 addi r4,r4,exc_exit_restart@l
770 cmplw r12,r4
771 blt 3f
772 lis r3,fee_restarts@ha
773 tophys(r3,r3)
774 lwz r5,fee_restarts@l(r3)
775 addi r5,r5,1
776 stw r5,fee_restarts@l(r3)
777 mr r12,r4 /* restart at exc_exit_restart */
778 b 2b
779
991eb43a
KG
780 .section .bss
781 .align 2
782fee_restarts:
783 .space 4
784 .previous
9994a338
PM
785
786/* aargh, a nonrecoverable interrupt, panic */
787/* aargh, we don't know which trap this is */
788/* but the 601 doesn't implement the RI bit, so assume it's OK */
7893:
790BEGIN_FTR_SECTION
791 b 2b
792END_FTR_SECTION_IFSET(CPU_FTR_601)
793 li r10,-1
d73e0c99 794 stw r10,_TRAP(r11)
9994a338
PM
795 addi r3,r1,STACK_FRAME_OVERHEAD
796 lis r10,MSR_KERNEL@h
797 ori r10,r10,MSR_KERNEL@l
798 bl transfer_to_handler_full
51423a9c 799 .long unrecoverable_exception
9994a338
PM
800 .long ret_from_except
801#endif
802
9994a338
PM
803 .globl ret_from_except_full
804ret_from_except_full:
805 REST_NVGPRS(r1)
806 /* fall through */
807
808 .globl ret_from_except
809ret_from_except:
810 /* Hard-disable interrupts so that current_thread_info()->flags
811 * can't change between when we test it and when we return
812 * from the interrupt. */
5d38902c 813 /* Note: We don't bother telling lockdep about it */
9994a338
PM
814 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
815 SYNC /* Some chip revs have problems here... */
816 MTMSRD(r10) /* disable interrupts */
817
818 lwz r3,_MSR(r1) /* Returning to user mode? */
819 andi. r0,r3,MSR_PR
820 beq resume_kernel
821
822user_exc_return: /* r10 contains MSR_KERNEL here */
823 /* Check current_thread_info()->flags */
9778b696 824 CURRENT_THREAD_INFO(r9, r1)
9994a338 825 lwz r9,TI_FLAGS(r9)
7a10174e 826 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
827 bne do_work
828
829restore_user:
830#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
4eaddb4d
KG
831 /* Check whether this process has its own DBCR0 value. The internal
832 debug mode bit tells us that dbcr0 should be loaded. */
9994a338 833 lwz r0,THREAD+THREAD_DBCR0(r2)
2325f0a0 834 andis. r10,r0,DBCR0_IDM@h
9994a338
PM
835 bnel- load_dbcr0
836#endif
c223c903
CL
837#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
838 CURRENT_THREAD_INFO(r9, r1)
839 ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
840#endif
9994a338 841
9994a338
PM
842 b restore
843
844/* N.B. the only way to get here is from the beq following ret_from_except. */
845resume_kernel:
a9c4e541 846 /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
9778b696 847 CURRENT_THREAD_INFO(r9, r1)
a9c4e541 848 lwz r8,TI_FLAGS(r9)
f7b33677 849 andis. r0,r8,_TIF_EMULATE_STACK_STORE@h
a9c4e541
TC
850 beq+ 1f
851
852 addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
853
854 lwz r3,GPR1(r1)
855 subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
856 mr r4,r1 /* src: current exception frame */
857 mr r1,r3 /* Reroute the trampoline frame to r1 */
858
859 /* Copy from the original to the trampoline. */
860 li r5,INT_FRAME_SIZE/4 /* size: INT_FRAME_SIZE */
861 li r6,0 /* start offset: 0 */
862 mtctr r5
8632: lwzx r0,r6,r4
864 stwx r0,r6,r3
865 addi r6,r6,4
866 bdnz 2b
867
868 /* Do real store operation to complete stwu */
869 lwz r5,GPR1(r1)
870 stw r8,0(r5)
871
872 /* Clear _TIF_EMULATE_STACK_STORE flag */
873 lis r11,_TIF_EMULATE_STACK_STORE@h
874 addi r5,r9,TI_FLAGS
8750: lwarx r8,0,r5
876 andc r8,r8,r11
877#ifdef CONFIG_IBM405_ERR77
878 dcbt 0,r5
879#endif
880 stwcx. r8,0,r5
881 bne- 0b
8821:
883
884#ifdef CONFIG_PREEMPT
885 /* check current_thread_info->preempt_count */
9994a338
PM
886 lwz r0,TI_PREEMPT(r9)
887 cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
888 bne restore
a9c4e541 889 andi. r8,r8,_TIF_NEED_RESCHED
9994a338 890 beq+ restore
a9c4e541 891 lwz r3,_MSR(r1)
9994a338
PM
892 andi. r0,r3,MSR_EE /* interrupts off? */
893 beq restore /* don't schedule if so */
5d38902c
BH
894#ifdef CONFIG_TRACE_IRQFLAGS
895 /* Lockdep thinks irqs are enabled, we need to call
896 * preempt_schedule_irq with IRQs off, so we inform lockdep
897 * now that we -did- turn them off already
898 */
899 bl trace_hardirqs_off
900#endif
9994a338 9011: bl preempt_schedule_irq
9778b696 902 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
903 lwz r3,TI_FLAGS(r9)
904 andi. r0,r3,_TIF_NEED_RESCHED
905 bne- 1b
5d38902c
BH
906#ifdef CONFIG_TRACE_IRQFLAGS
907 /* And now, to properly rebalance the above, we tell lockdep they
908 * are being turned back on, which will happen when we return
909 */
910 bl trace_hardirqs_on
911#endif
9994a338
PM
912#endif /* CONFIG_PREEMPT */
913
914 /* interrupts are hard-disabled at this point */
915restore:
b98ac05d 916#ifdef CONFIG_44x
e7f75ad0
DK
917BEGIN_MMU_FTR_SECTION
918 b 1f
919END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
b98ac05d
BH
920 lis r4,icache_44x_need_flush@ha
921 lwz r5,icache_44x_need_flush@l(r4)
922 cmplwi cr0,r5,0
923 beq+ 1f
924 li r6,0
925 iccci r0,r0
926 stw r6,icache_44x_need_flush@l(r4)
9271:
928#endif /* CONFIG_44x */
5d38902c
BH
929
930 lwz r9,_MSR(r1)
931#ifdef CONFIG_TRACE_IRQFLAGS
932 /* Lockdep doesn't know about the fact that IRQs are temporarily turned
933 * off in this assembly code while peeking at TI_FLAGS() and such. However
934 * we need to inform it if the exception turned interrupts off, and we
935 * are about to trun them back on.
936 *
937 * The problem here sadly is that we don't know whether the exceptions was
938 * one that turned interrupts off or not. So we always tell lockdep about
939 * turning them on here when we go back to wherever we came from with EE
940 * on, even if that may meen some redudant calls being tracked. Maybe later
941 * we could encode what the exception did somewhere or test the exception
942 * type in the pt_regs but that sounds overkill
943 */
944 andi. r10,r9,MSR_EE
945 beq 1f
06ca2188
SR
946 /*
947 * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
948 * which is the stack frame here, we need to force a stack frame
949 * in case we came from user space.
950 */
951 stwu r1,-32(r1)
952 mflr r0
953 stw r0,4(r1)
954 stwu r1,-32(r1)
5d38902c 955 bl trace_hardirqs_on
06ca2188
SR
956 lwz r1,0(r1)
957 lwz r1,0(r1)
5d38902c
BH
958 lwz r9,_MSR(r1)
9591:
960#endif /* CONFIG_TRACE_IRQFLAGS */
961
9994a338
PM
962 lwz r0,GPR0(r1)
963 lwz r2,GPR2(r1)
964 REST_4GPRS(3, r1)
965 REST_2GPRS(7, r1)
966
967 lwz r10,_XER(r1)
968 lwz r11,_CTR(r1)
969 mtspr SPRN_XER,r10
970 mtctr r11
971
972 PPC405_ERR77(0,r1)
b64f87c1
BB
973BEGIN_FTR_SECTION
974 lwarx r11,0,r1
975END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
9994a338
PM
976 stwcx. r0,0,r1 /* to clear the reservation */
977
978#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
9994a338
PM
979 andi. r10,r9,MSR_RI /* check if this exception occurred */
980 beql nonrecoverable /* at a bad place (MSR:RI = 0) */
981
982 lwz r10,_CCR(r1)
983 lwz r11,_LINK(r1)
984 mtcrf 0xFF,r10
985 mtlr r11
986
987 /*
988 * Once we put values in SRR0 and SRR1, we are in a state
989 * where exceptions are not recoverable, since taking an
990 * exception will trash SRR0 and SRR1. Therefore we clear the
991 * MSR:RI bit to indicate this. If we do take an exception,
992 * we can't return to the point of the exception but we
993 * can restart the exception exit path at the label
994 * exc_exit_restart below. -- paulus
995 */
996 LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
997 SYNC
998 MTMSRD(r10) /* clear the RI bit */
999 .globl exc_exit_restart
1000exc_exit_restart:
9994a338 1001 lwz r12,_NIP(r1)
9994a338
PM
1002 mtspr SPRN_SRR0,r12
1003 mtspr SPRN_SRR1,r9
1004 REST_4GPRS(9, r1)
1005 lwz r1,GPR1(r1)
1006 .globl exc_exit_restart_end
1007exc_exit_restart_end:
1008 SYNC
1009 RFI
1010
1011#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1012 /*
1013 * This is a bit different on 4xx/Book-E because it doesn't have
1014 * the RI bit in the MSR.
1015 * The TLB miss handler checks if we have interrupted
1016 * the exception exit path and restarts it if so
1017 * (well maybe one day it will... :).
1018 */
1019 lwz r11,_LINK(r1)
1020 mtlr r11
1021 lwz r10,_CCR(r1)
1022 mtcrf 0xff,r10
1023 REST_2GPRS(9, r1)
1024 .globl exc_exit_restart
1025exc_exit_restart:
1026 lwz r11,_NIP(r1)
1027 lwz r12,_MSR(r1)
1028exc_exit_start:
1029 mtspr SPRN_SRR0,r11
1030 mtspr SPRN_SRR1,r12
1031 REST_2GPRS(11, r1)
1032 lwz r1,GPR1(r1)
1033 .globl exc_exit_restart_end
1034exc_exit_restart_end:
1035 PPC405_ERR77_SYNC
1036 rfi
1037 b . /* prevent prefetch past rfi */
1038
1039/*
1040 * Returning from a critical interrupt in user mode doesn't need
1041 * to be any different from a normal exception. For a critical
1042 * interrupt in the kernel, we just return (without checking for
1043 * preemption) since the interrupt may have happened at some crucial
1044 * place (e.g. inside the TLB miss handler), and because we will be
1045 * running with r1 pointing into critical_stack, not the current
1046 * process's kernel stack (and therefore current_thread_info() will
1047 * give the wrong answer).
1048 * We have to restore various SPRs that may have been in use at the
1049 * time of the critical interrupt.
1050 *
1051 */
1052#ifdef CONFIG_40x
1053#define PPC_40x_TURN_OFF_MSR_DR \
1054 /* avoid any possible TLB misses here by turning off MSR.DR, we \
1055 * assume the instructions here are mapped by a pinned TLB entry */ \
1056 li r10,MSR_IR; \
1057 mtmsr r10; \
1058 isync; \
1059 tophys(r1, r1);
1060#else
1061#define PPC_40x_TURN_OFF_MSR_DR
1062#endif
1063
1064#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi) \
1065 REST_NVGPRS(r1); \
1066 lwz r3,_MSR(r1); \
1067 andi. r3,r3,MSR_PR; \
1068 LOAD_MSR_KERNEL(r10,MSR_KERNEL); \
1069 bne user_exc_return; \
1070 lwz r0,GPR0(r1); \
1071 lwz r2,GPR2(r1); \
1072 REST_4GPRS(3, r1); \
1073 REST_2GPRS(7, r1); \
1074 lwz r10,_XER(r1); \
1075 lwz r11,_CTR(r1); \
1076 mtspr SPRN_XER,r10; \
1077 mtctr r11; \
1078 PPC405_ERR77(0,r1); \
1079 stwcx. r0,0,r1; /* to clear the reservation */ \
1080 lwz r11,_LINK(r1); \
1081 mtlr r11; \
1082 lwz r10,_CCR(r1); \
1083 mtcrf 0xff,r10; \
1084 PPC_40x_TURN_OFF_MSR_DR; \
1085 lwz r9,_DEAR(r1); \
1086 lwz r10,_ESR(r1); \
1087 mtspr SPRN_DEAR,r9; \
1088 mtspr SPRN_ESR,r10; \
1089 lwz r11,_NIP(r1); \
1090 lwz r12,_MSR(r1); \
1091 mtspr exc_lvl_srr0,r11; \
1092 mtspr exc_lvl_srr1,r12; \
1093 lwz r9,GPR9(r1); \
1094 lwz r12,GPR12(r1); \
1095 lwz r10,GPR10(r1); \
1096 lwz r11,GPR11(r1); \
1097 lwz r1,GPR1(r1); \
1098 PPC405_ERR77_SYNC; \
1099 exc_lvl_rfi; \
1100 b .; /* prevent prefetch past exc_lvl_rfi */
1101
fca622c5
KG
1102#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1) \
1103 lwz r9,_##exc_lvl_srr0(r1); \
1104 lwz r10,_##exc_lvl_srr1(r1); \
1105 mtspr SPRN_##exc_lvl_srr0,r9; \
1106 mtspr SPRN_##exc_lvl_srr1,r10;
1107
70fe3af8 1108#if defined(CONFIG_PPC_BOOK3E_MMU)
fca622c5
KG
1109#ifdef CONFIG_PHYS_64BIT
1110#define RESTORE_MAS7 \
1111 lwz r11,MAS7(r1); \
1112 mtspr SPRN_MAS7,r11;
1113#else
1114#define RESTORE_MAS7
1115#endif /* CONFIG_PHYS_64BIT */
1116#define RESTORE_MMU_REGS \
1117 lwz r9,MAS0(r1); \
1118 lwz r10,MAS1(r1); \
1119 lwz r11,MAS2(r1); \
1120 mtspr SPRN_MAS0,r9; \
1121 lwz r9,MAS3(r1); \
1122 mtspr SPRN_MAS1,r10; \
1123 lwz r10,MAS6(r1); \
1124 mtspr SPRN_MAS2,r11; \
1125 mtspr SPRN_MAS3,r9; \
1126 mtspr SPRN_MAS6,r10; \
1127 RESTORE_MAS7;
1128#elif defined(CONFIG_44x)
1129#define RESTORE_MMU_REGS \
1130 lwz r9,MMUCR(r1); \
1131 mtspr SPRN_MMUCR,r9;
1132#else
1133#define RESTORE_MMU_REGS
1134#endif
1135
1136#ifdef CONFIG_40x
9994a338
PM
1137 .globl ret_from_crit_exc
1138ret_from_crit_exc:
ee43eb78 1139 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1140 lis r10,saved_ksp_limit@ha;
1141 lwz r10,saved_ksp_limit@l(r10);
1142 tovirt(r9,r9);
1143 stw r10,KSP_LIMIT(r9)
1144 lis r9,crit_srr0@ha;
1145 lwz r9,crit_srr0@l(r9);
1146 lis r10,crit_srr1@ha;
1147 lwz r10,crit_srr1@l(r10);
1148 mtspr SPRN_SRR0,r9;
1149 mtspr SPRN_SRR1,r10;
16c57b36 1150 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1151#endif /* CONFIG_40x */
9994a338
PM
1152
1153#ifdef CONFIG_BOOKE
fca622c5
KG
1154 .globl ret_from_crit_exc
1155ret_from_crit_exc:
ee43eb78 1156 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1157 lwz r10,SAVED_KSP_LIMIT(r1)
1158 stw r10,KSP_LIMIT(r9)
1159 RESTORE_xSRR(SRR0,SRR1);
1160 RESTORE_MMU_REGS;
16c57b36 1161 RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
fca622c5 1162
9994a338
PM
1163 .globl ret_from_debug_exc
1164ret_from_debug_exc:
ee43eb78 1165 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1166 lwz r10,SAVED_KSP_LIMIT(r1)
1167 stw r10,KSP_LIMIT(r9)
8c1fc5ab 1168 lwz r9,TASK_STACK-THREAD(r9)
9778b696 1169 CURRENT_THREAD_INFO(r10, r1)
fca622c5
KG
1170 lwz r10,TI_PREEMPT(r10)
1171 stw r10,TI_PREEMPT(r9)
1172 RESTORE_xSRR(SRR0,SRR1);
1173 RESTORE_xSRR(CSRR0,CSRR1);
1174 RESTORE_MMU_REGS;
16c57b36 1175 RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
9994a338
PM
1176
1177 .globl ret_from_mcheck_exc
1178ret_from_mcheck_exc:
ee43eb78 1179 mfspr r9,SPRN_SPRG_THREAD
fca622c5
KG
1180 lwz r10,SAVED_KSP_LIMIT(r1)
1181 stw r10,KSP_LIMIT(r9)
1182 RESTORE_xSRR(SRR0,SRR1);
1183 RESTORE_xSRR(CSRR0,CSRR1);
1184 RESTORE_xSRR(DSRR0,DSRR1);
1185 RESTORE_MMU_REGS;
16c57b36 1186 RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
9994a338
PM
1187#endif /* CONFIG_BOOKE */
1188
1189/*
1190 * Load the DBCR0 value for a task that is being ptraced,
1191 * having first saved away the global DBCR0. Note that r0
1192 * has the dbcr0 value to set upon entry to this.
1193 */
1194load_dbcr0:
1195 mfmsr r10 /* first disable debug exceptions */
1196 rlwinm r10,r10,0,~MSR_DE
1197 mtmsr r10
1198 isync
1199 mfspr r10,SPRN_DBCR0
1200 lis r11,global_dbcr0@ha
1201 addi r11,r11,global_dbcr0@l
4eaddb4d 1202#ifdef CONFIG_SMP
9778b696 1203 CURRENT_THREAD_INFO(r9, r1)
4eaddb4d
KG
1204 lwz r9,TI_CPU(r9)
1205 slwi r9,r9,3
1206 add r11,r11,r9
1207#endif
9994a338
PM
1208 stw r10,0(r11)
1209 mtspr SPRN_DBCR0,r0
1210 lwz r10,4(r11)
1211 addi r10,r10,1
1212 stw r10,4(r11)
1213 li r11,-1
1214 mtspr SPRN_DBSR,r11 /* clear all pending debug events */
1215 blr
1216
991eb43a
KG
1217 .section .bss
1218 .align 4
1219global_dbcr0:
4eaddb4d 1220 .space 8*NR_CPUS
991eb43a 1221 .previous
9994a338
PM
1222#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1223
1224do_work: /* r10 contains MSR_KERNEL here */
1225 andi. r0,r9,_TIF_NEED_RESCHED
1226 beq do_user_signal
1227
1228do_resched: /* r10 contains MSR_KERNEL here */
5d38902c
BH
1229 /* Note: We don't need to inform lockdep that we are enabling
1230 * interrupts here. As far as it knows, they are already enabled
1231 */
9994a338
PM
1232 ori r10,r10,MSR_EE
1233 SYNC
1234 MTMSRD(r10) /* hard-enable interrupts */
1235 bl schedule
1236recheck:
5d38902c
BH
1237 /* Note: And we don't tell it we are disabling them again
1238 * neither. Those disable/enable cycles used to peek at
1239 * TI_FLAGS aren't advertised.
1240 */
9994a338
PM
1241 LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1242 SYNC
1243 MTMSRD(r10) /* disable interrupts */
9778b696 1244 CURRENT_THREAD_INFO(r9, r1)
9994a338
PM
1245 lwz r9,TI_FLAGS(r9)
1246 andi. r0,r9,_TIF_NEED_RESCHED
1247 bne- do_resched
7a10174e 1248 andi. r0,r9,_TIF_USER_WORK_MASK
9994a338
PM
1249 beq restore_user
1250do_user_signal: /* r10 contains MSR_KERNEL here */
1251 ori r10,r10,MSR_EE
1252 SYNC
1253 MTMSRD(r10) /* hard-enable interrupts */
1254 /* save r13-r31 in the exception frame, if not already done */
d73e0c99 1255 lwz r3,_TRAP(r1)
9994a338
PM
1256 andi. r0,r3,1
1257 beq 2f
1258 SAVE_NVGPRS(r1)
1259 rlwinm r3,r3,0,0,30
d73e0c99 1260 stw r3,_TRAP(r1)
7d6d637d
RM
12612: addi r3,r1,STACK_FRAME_OVERHEAD
1262 mr r4,r9
18b246fa 1263 bl do_notify_resume
9994a338
PM
1264 REST_NVGPRS(r1)
1265 b recheck
1266
1267/*
1268 * We come here when we are at the end of handling an exception
1269 * that occurred at a place where taking an exception will lose
1270 * state information, such as the contents of SRR0 and SRR1.
1271 */
1272nonrecoverable:
1273 lis r10,exc_exit_restart_end@ha
1274 addi r10,r10,exc_exit_restart_end@l
1275 cmplw r12,r10
1276 bge 3f
1277 lis r11,exc_exit_restart@ha
1278 addi r11,r11,exc_exit_restart@l
1279 cmplw r12,r11
1280 blt 3f
1281 lis r10,ee_restarts@ha
1282 lwz r12,ee_restarts@l(r10)
1283 addi r12,r12,1
1284 stw r12,ee_restarts@l(r10)
1285 mr r12,r11 /* restart at exc_exit_restart */
1286 blr
12873: /* OK, we can't recover, kill this process */
1288 /* but the 601 doesn't implement the RI bit, so assume it's OK */
1289BEGIN_FTR_SECTION
1290 blr
1291END_FTR_SECTION_IFSET(CPU_FTR_601)
d73e0c99 1292 lwz r3,_TRAP(r1)
9994a338
PM
1293 andi. r0,r3,1
1294 beq 4f
1295 SAVE_NVGPRS(r1)
1296 rlwinm r3,r3,0,0,30
d73e0c99 1297 stw r3,_TRAP(r1)
9994a338 12984: addi r3,r1,STACK_FRAME_OVERHEAD
51423a9c 1299 bl unrecoverable_exception
9994a338
PM
1300 /* shouldn't return */
1301 b 4b
1302
991eb43a
KG
1303 .section .bss
1304 .align 2
1305ee_restarts:
1306 .space 4
1307 .previous
9994a338
PM
1308
1309/*
1310 * PROM code for specific machines follows. Put it
1311 * here so it's easy to add arch-specific sections later.
1312 * -- Cort
1313 */
033ef338 1314#ifdef CONFIG_PPC_RTAS
9994a338
PM
1315/*
1316 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1317 * called with the MMU off.
1318 */
1319_GLOBAL(enter_rtas)
1320 stwu r1,-INT_FRAME_SIZE(r1)
1321 mflr r0
1322 stw r0,INT_FRAME_SIZE+4(r1)
e58c3495 1323 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
1324 lis r6,1f@ha /* physical return address for rtas */
1325 addi r6,r6,1f@l
1326 tophys(r6,r6)
1327 tophys(r7,r1)
033ef338
PM
1328 lwz r8,RTASENTRY(r4)
1329 lwz r4,RTASBASE(r4)
9994a338
PM
1330 mfmsr r9
1331 stw r9,8(r1)
1332 LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1333 SYNC /* disable interrupts so SRR0/1 */
1334 MTMSRD(r0) /* don't get trashed */
1335 li r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1336 mtlr r6
0df977ea 1337 stw r7, THREAD + RTAS_SP(r2)
9994a338
PM
1338 mtspr SPRN_SRR0,r8
1339 mtspr SPRN_SRR1,r9
1340 RFI
13411: tophys(r9,r1)
1342 lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
1343 lwz r9,8(r9) /* original msr value */
9994a338
PM
1344 addi r1,r1,INT_FRAME_SIZE
1345 li r0,0
0df977ea
CL
1346 tophys(r7, r2)
1347 stw r0, THREAD + RTAS_SP(r7)
9994a338
PM
1348 mtspr SPRN_SRR0,r8
1349 mtspr SPRN_SRR1,r9
1350 RFI /* return to caller */
1351
1352 .globl machine_check_in_rtas
1353machine_check_in_rtas:
1354 twi 31,0,0
1355 /* XXX load up BATs and panic */
1356
033ef338 1357#endif /* CONFIG_PPC_RTAS */