powerpc: Dynamically allocate most lppaca structs
[linux-2.6-block.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
007d88d0 31#include <asm/bug.h>
ec2b36b9 32#include <asm/ptrace.h>
945feb17 33#include <asm/irqflags.h>
395a59d0 34#include <asm/ftrace.h>
9994a338
PM
35
36/*
37 * System calls.
38 */
39 .section ".toc","aw"
40.SYS_CALL_TABLE:
41 .tc .sys_call_table[TC],.sys_call_table
42
43/* This value is used to mark exception frames on the stack. */
44exception_marker:
ec2b36b9 45 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
46
47 .section ".text"
48 .align 7
49
50#undef SHOW_SYSCALLS
51
52 .globl system_call_common
53system_call_common:
54 andi. r10,r12,MSR_PR
55 mr r10,r1
56 addi r1,r1,-INT_FRAME_SIZE
57 beq- 1f
58 ld r1,PACAKSAVE(r13)
591: std r10,0(r1)
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
c6622f63 64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
ab598b66
PM
65 /*
66 * This "crclr so" clears CR0.SO, which is the error indication on
67 * return from this system call. There must be no cmp instruction
68 * between it and the "mfcr r9" below, otherwise if XER.SO is set,
69 * CR0.SO will get set, causing all system calls to appear to fail.
70 */
71 crclr so
9994a338
PM
72 std r2,GPR2(r1)
73 std r3,GPR3(r1)
74 std r4,GPR4(r1)
75 std r5,GPR5(r1)
76 std r6,GPR6(r1)
77 std r7,GPR7(r1)
78 std r8,GPR8(r1)
79 li r11,0
80 std r11,GPR9(r1)
81 std r11,GPR10(r1)
82 std r11,GPR11(r1)
83 std r11,GPR12(r1)
84 std r9,GPR13(r1)
9994a338
PM
85 mfcr r9
86 mflr r10
87 li r11,0xc01
88 std r9,_CCR(r1)
89 std r10,_LINK(r1)
90 std r11,_TRAP(r1)
91 mfxer r9
92 mfctr r10
93 std r9,_XER(r1)
94 std r10,_CTR(r1)
95 std r3,ORIG_GPR3(r1)
96 ld r2,PACATOC(r13)
97 addi r9,r1,STACK_FRAME_OVERHEAD
98 ld r11,exception_marker@toc(r2)
99 std r11,-16(r9) /* "regshere" marker */
945feb17
BH
100#ifdef CONFIG_TRACE_IRQFLAGS
101 bl .trace_hardirqs_on
102 REST_GPR(0,r1)
103 REST_4GPRS(3,r1)
104 REST_2GPRS(7,r1)
105 addi r9,r1,STACK_FRAME_OVERHEAD
106 ld r12,_MSR(r1)
107#endif /* CONFIG_TRACE_IRQFLAGS */
d04c56f7
PM
108 li r10,1
109 stb r10,PACASOFTIRQEN(r13)
110 stb r10,PACAHARDIRQEN(r13)
111 std r10,SOFTE(r1)
9994a338 112#ifdef CONFIG_PPC_ISERIES
3f639ee8 113BEGIN_FW_FTR_SECTION
9994a338
PM
114 /* Hack for handling interrupts when soft-enabling on iSeries */
115 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
116 andi. r10,r12,MSR_PR /* from kernel */
117 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
118 bne 2f
119 b hardware_interrupt_entry
1202:
3f639ee8 121END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
945feb17 122#endif /* CONFIG_PPC_ISERIES */
2d27cfd3
BH
123
124 /* Hard enable interrupts */
125#ifdef CONFIG_PPC_BOOK3E
126 wrteei 1
127#else
9994a338
PM
128 mfmsr r11
129 ori r11,r11,MSR_EE
130 mtmsrd r11,1
2d27cfd3 131#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
132
133#ifdef SHOW_SYSCALLS
134 bl .do_show_syscall
135 REST_GPR(0,r1)
136 REST_4GPRS(3,r1)
137 REST_2GPRS(7,r1)
138 addi r9,r1,STACK_FRAME_OVERHEAD
139#endif
140 clrrdi r11,r1,THREAD_SHIFT
9994a338 141 ld r10,TI_FLAGS(r11)
9994a338
PM
142 andi. r11,r10,_TIF_SYSCALL_T_OR_A
143 bne- syscall_dotrace
144syscall_dotrace_cont:
145 cmpldi 0,r0,NR_syscalls
146 bge- syscall_enosys
147
148system_call: /* label this so stack traces look sane */
149/*
150 * Need to vector to 32 Bit or default sys_call_table here,
151 * based on caller's run-mode / personality.
152 */
153 ld r11,.SYS_CALL_TABLE@toc(2)
154 andi. r10,r10,_TIF_32BIT
155 beq 15f
156 addi r11,r11,8 /* use 32-bit syscall entries */
157 clrldi r3,r3,32
158 clrldi r4,r4,32
159 clrldi r5,r5,32
160 clrldi r6,r6,32
161 clrldi r7,r7,32
162 clrldi r8,r8,32
16315:
164 slwi r0,r0,4
165 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
166 mtctr r10
167 bctrl /* Call handler */
168
169syscall_exit:
401d1f02 170 std r3,RESULT(r1)
9994a338 171#ifdef SHOW_SYSCALLS
9994a338 172 bl .do_show_syscall_exit
401d1f02 173 ld r3,RESULT(r1)
9994a338 174#endif
9994a338 175 clrrdi r12,r1,THREAD_SHIFT
9994a338 176
9994a338 177 ld r8,_MSR(r1)
2d27cfd3
BH
178#ifdef CONFIG_PPC_BOOK3S
179 /* No MSR:RI on BookE */
9994a338
PM
180 andi. r10,r8,MSR_RI
181 beq- unrecov_restore
2d27cfd3
BH
182#endif
183
184 /* Disable interrupts so current_thread_info()->flags can't change,
185 * and so that we don't get interrupted after loading SRR0/1.
186 */
187#ifdef CONFIG_PPC_BOOK3E
188 wrteei 0
189#else
9994a338
PM
190 mfmsr r10
191 rldicl r10,r10,48,1
192 rotldi r10,r10,16
193 mtmsrd r10,1
2d27cfd3
BH
194#endif /* CONFIG_PPC_BOOK3E */
195
9994a338 196 ld r9,TI_FLAGS(r12)
401d1f02 197 li r11,-_LAST_ERRNO
1bd79336 198 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 199 bne- syscall_exit_work
401d1f02
DW
200 cmpld r3,r11
201 ld r5,_CCR(r1)
202 bge- syscall_error
203syscall_error_cont:
9994a338 204 ld r7,_NIP(r1)
f89451fb 205BEGIN_FTR_SECTION
9994a338 206 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb 207END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
9994a338
PM
208 andi. r6,r8,MSR_PR
209 ld r4,_LINK(r1)
e56a6e20
PM
210 /*
211 * Clear RI before restoring r13. If we are returning to
212 * userspace and we take an exception after restoring r13,
213 * we end up corrupting the userspace r13 value.
214 */
2d27cfd3
BH
215#ifdef CONFIG_PPC_BOOK3S
216 /* No MSR:RI on BookE */
e56a6e20
PM
217 li r12,MSR_RI
218 andc r11,r10,r12
219 mtmsrd r11,1 /* clear MSR.RI */
2d27cfd3
BH
220#endif /* CONFIG_PPC_BOOK3S */
221
c6622f63
PM
222 beq- 1f
223 ACCOUNT_CPU_USER_EXIT(r11, r12)
224 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 2251: ld r2,GPR2(r1)
9994a338
PM
226 ld r1,GPR1(r1)
227 mtlr r4
228 mtcr r5
229 mtspr SPRN_SRR0,r7
230 mtspr SPRN_SRR1,r8
2d27cfd3 231 RFI
9994a338
PM
232 b . /* prevent speculative execution */
233
401d1f02 234syscall_error:
9994a338 235 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 236 neg r3,r3
9994a338
PM
237 std r5,_CCR(r1)
238 b syscall_error_cont
401d1f02 239
9994a338
PM
240/* Traced system call support */
241syscall_dotrace:
242 bl .save_nvgprs
243 addi r3,r1,STACK_FRAME_OVERHEAD
244 bl .do_syscall_trace_enter
4f72c427
RM
245 /*
246 * Restore argument registers possibly just changed.
247 * We use the return value of do_syscall_trace_enter
248 * for the call number to look up in the table (r0).
249 */
250 mr r0,r3
9994a338
PM
251 ld r3,GPR3(r1)
252 ld r4,GPR4(r1)
253 ld r5,GPR5(r1)
254 ld r6,GPR6(r1)
255 ld r7,GPR7(r1)
256 ld r8,GPR8(r1)
257 addi r9,r1,STACK_FRAME_OVERHEAD
258 clrrdi r10,r1,THREAD_SHIFT
259 ld r10,TI_FLAGS(r10)
260 b syscall_dotrace_cont
261
401d1f02
DW
262syscall_enosys:
263 li r3,-ENOSYS
264 b syscall_exit
265
266syscall_exit_work:
267 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
268 If TIF_NOERROR is set, just save r3 as it is. */
269
270 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
271 beq+ 0f
272 REST_NVGPRS(r1)
273 b 2f
2740: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
275 blt+ 1f
276 andi. r0,r9,_TIF_NOERROR
277 bne- 1f
278 ld r5,_CCR(r1)
279 neg r3,r3
280 oris r5,r5,0x1000 /* Set SO bit in CR */
281 std r5,_CCR(r1)
2821: std r3,GPR3(r1)
2832: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
284 beq 4f
285
1bd79336 286 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
287
288 li r11,_TIF_PERSYSCALL_MASK
289 addi r12,r12,TI_FLAGS
2903: ldarx r10,0,r12
291 andc r10,r10,r11
292 stdcx. r10,0,r12
293 bne- 3b
294 subi r12,r12,TI_FLAGS
1bd79336
PM
295
2964: /* Anything else left to do? */
297 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
298 beq .ret_from_except_lite
299
300 /* Re-enable interrupts */
2d27cfd3
BH
301#ifdef CONFIG_PPC_BOOK3E
302 wrteei 1
303#else
401d1f02
DW
304 mfmsr r10
305 ori r10,r10,MSR_EE
306 mtmsrd r10,1
2d27cfd3 307#endif /* CONFIG_PPC_BOOK3E */
401d1f02 308
1bd79336 309 bl .save_nvgprs
9994a338
PM
310 addi r3,r1,STACK_FRAME_OVERHEAD
311 bl .do_syscall_trace_leave
1bd79336 312 b .ret_from_except
9994a338
PM
313
314/* Save non-volatile GPRs, if not already saved. */
315_GLOBAL(save_nvgprs)
316 ld r11,_TRAP(r1)
317 andi. r0,r11,1
318 beqlr-
319 SAVE_NVGPRS(r1)
320 clrrdi r0,r11,1
321 std r0,_TRAP(r1)
322 blr
323
401d1f02 324
9994a338
PM
325/*
326 * The sigsuspend and rt_sigsuspend system calls can call do_signal
327 * and thus put the process into the stopped state where we might
328 * want to examine its user state with ptrace. Therefore we need
329 * to save all the nonvolatile registers (r14 - r31) before calling
330 * the C code. Similarly, fork, vfork and clone need the full
331 * register state on the stack so that it can be copied to the child.
332 */
9994a338
PM
333
334_GLOBAL(ppc_fork)
335 bl .save_nvgprs
336 bl .sys_fork
337 b syscall_exit
338
339_GLOBAL(ppc_vfork)
340 bl .save_nvgprs
341 bl .sys_vfork
342 b syscall_exit
343
344_GLOBAL(ppc_clone)
345 bl .save_nvgprs
346 bl .sys_clone
347 b syscall_exit
348
1bd79336
PM
349_GLOBAL(ppc32_swapcontext)
350 bl .save_nvgprs
351 bl .compat_sys_swapcontext
352 b syscall_exit
353
354_GLOBAL(ppc64_swapcontext)
355 bl .save_nvgprs
356 bl .sys_swapcontext
357 b syscall_exit
358
9994a338
PM
359_GLOBAL(ret_from_fork)
360 bl .schedule_tail
361 REST_NVGPRS(r1)
362 li r3,0
363 b syscall_exit
364
365/*
366 * This routine switches between two different tasks. The process
367 * state of one is saved on its kernel stack. Then the state
368 * of the other is restored from its kernel stack. The memory
369 * management hardware is updated to the second process's state.
370 * Finally, we can return to the second process, via ret_from_except.
371 * On entry, r3 points to the THREAD for the current task, r4
372 * points to the THREAD for the new task.
373 *
374 * Note: there are two ways to get to the "going out" portion
375 * of this code; either by coming in via the entry (_switch)
376 * or via "fork" which must set up an environment equivalent
377 * to the "_switch" path. If you change this you'll have to change
378 * the fork code also.
379 *
380 * The code which creates the new task context is in 'copy_thread'
2ef9481e 381 * in arch/powerpc/kernel/process.c
9994a338
PM
382 */
383 .align 7
384_GLOBAL(_switch)
385 mflr r0
386 std r0,16(r1)
387 stdu r1,-SWITCH_FRAME_SIZE(r1)
388 /* r3-r13 are caller saved -- Cort */
389 SAVE_8GPRS(14, r1)
390 SAVE_10GPRS(22, r1)
391 mflr r20 /* Return to switch caller */
392 mfmsr r22
393 li r0, MSR_FP
ce48b210
MN
394#ifdef CONFIG_VSX
395BEGIN_FTR_SECTION
396 oris r0,r0,MSR_VSX@h /* Disable VSX */
397END_FTR_SECTION_IFSET(CPU_FTR_VSX)
398#endif /* CONFIG_VSX */
9994a338
PM
399#ifdef CONFIG_ALTIVEC
400BEGIN_FTR_SECTION
401 oris r0,r0,MSR_VEC@h /* Disable altivec */
402 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
403 std r24,THREAD_VRSAVE(r3)
404END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
405#endif /* CONFIG_ALTIVEC */
406 and. r0,r0,r22
407 beq+ 1f
408 andc r22,r22,r0
2d27cfd3 409 MTMSRD(r22)
9994a338
PM
410 isync
4111: std r20,_NIP(r1)
412 mfcr r23
413 std r23,_CCR(r1)
414 std r1,KSP(r3) /* Set old stack pointer */
415
416#ifdef CONFIG_SMP
417 /* We need a sync somewhere here to make sure that if the
418 * previous task gets rescheduled on another CPU, it sees all
419 * stores it has performed on this one.
420 */
421 sync
422#endif /* CONFIG_SMP */
423
f89451fb
AB
424 /*
425 * If we optimise away the clear of the reservation in system
426 * calls because we know the CPU tracks the address of the
427 * reservation, then we need to clear it here to cover the
428 * case that the kernel context switch path has no larx
429 * instructions.
430 */
431BEGIN_FTR_SECTION
432 ldarx r6,0,r1
433END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
434
9994a338
PM
435 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
436 std r6,PACACURRENT(r13) /* Set new 'current' */
437
438 ld r8,KSP(r4) /* new stack pointer */
2d27cfd3 439#ifdef CONFIG_PPC_BOOK3S
1189be65 440BEGIN_FTR_SECTION
c230328d 441 BEGIN_FTR_SECTION_NESTED(95)
9994a338
PM
442 clrrdi r6,r8,28 /* get its ESID */
443 clrrdi r9,r1,28 /* get current sp ESID */
c230328d 444 FTR_SECTION_ELSE_NESTED(95)
1189be65
PM
445 clrrdi r6,r8,40 /* get its 1T ESID */
446 clrrdi r9,r1,40 /* get current sp 1T ESID */
c230328d
ME
447 ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95)
448FTR_SECTION_ELSE
449 b 2f
450ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB)
9994a338
PM
451 clrldi. r0,r6,2 /* is new ESID c00000000? */
452 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
453 cror eq,4*cr1+eq,eq
454 beq 2f /* if yes, don't slbie it */
455
456 /* Bolt in the new stack SLB entry */
457 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
458 oris r0,r6,(SLB_ESID_V)@h
459 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
460BEGIN_FTR_SECTION
461 li r9,MMU_SEGSIZE_1T /* insert B field */
462 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
463 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
464END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
2f6093c8 465
00efee7d
MN
466 /* Update the last bolted SLB. No write barriers are needed
467 * here, provided we only update the current CPU's SLB shadow
468 * buffer.
469 */
2f6093c8 470 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
471 li r12,0
472 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
473 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
474 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 475
f66bce5e
OJ
476 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
477 * we have 1TB segments, the only CPUs known to have the errata
478 * only support less than 1TB of system memory and we'll never
479 * actually hit this code path.
480 */
481
9994a338
PM
482 slbie r6
483 slbie r6 /* Workaround POWER5 < DD2.1 issue */
484 slbmte r7,r0
485 isync
9994a338 4862:
2d27cfd3
BH
487#endif /* !CONFIG_PPC_BOOK3S */
488
9994a338
PM
489 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
490 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
491 because we don't need to leave the 288-byte ABI gap at the
492 top of the kernel stack. */
493 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
494
495 mr r1,r8 /* start using new stack pointer */
496 std r7,PACAKSAVE(r13)
497
498 ld r6,_CCR(r1)
499 mtcrf 0xFF,r6
500
501#ifdef CONFIG_ALTIVEC
502BEGIN_FTR_SECTION
503 ld r0,THREAD_VRSAVE(r4)
504 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
505END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
506#endif /* CONFIG_ALTIVEC */
507
508 /* r3-r13 are destroyed -- Cort */
509 REST_8GPRS(14, r1)
510 REST_10GPRS(22, r1)
511
512 /* convert old thread to its task_struct for return value */
513 addi r3,r3,-THREAD
514 ld r7,_NIP(r1) /* Return to _switch caller in new task */
515 mtlr r7
516 addi r1,r1,SWITCH_FRAME_SIZE
517 blr
518
519 .align 7
520_GLOBAL(ret_from_except)
521 ld r11,_TRAP(r1)
522 andi. r0,r11,1
523 bne .ret_from_except_lite
524 REST_NVGPRS(r1)
525
526_GLOBAL(ret_from_except_lite)
527 /*
528 * Disable interrupts so that current_thread_info()->flags
529 * can't change between when we test it and when we return
530 * from the interrupt.
531 */
2d27cfd3
BH
532#ifdef CONFIG_PPC_BOOK3E
533 wrteei 0
534#else
9994a338
PM
535 mfmsr r10 /* Get current interrupt state */
536 rldicl r9,r10,48,1 /* clear MSR_EE */
537 rotldi r9,r9,16
538 mtmsrd r9,1 /* Update machine state */
2d27cfd3 539#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
540
541#ifdef CONFIG_PREEMPT
542 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
543 li r0,_TIF_NEED_RESCHED /* bits to check */
544 ld r3,_MSR(r1)
545 ld r4,TI_FLAGS(r9)
546 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
547 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
548 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
549 bne do_work
550
551#else /* !CONFIG_PREEMPT */
552 ld r3,_MSR(r1) /* Returning to user mode? */
553 andi. r3,r3,MSR_PR
554 beq restore /* if not, just restore regs and return */
555
556 /* Check current_thread_info()->flags */
557 clrrdi r9,r1,THREAD_SHIFT
558 ld r4,TI_FLAGS(r9)
559 andi. r0,r4,_TIF_USER_WORK_MASK
560 bne do_work
561#endif
562
563restore:
3f639ee8 564BEGIN_FW_FTR_SECTION
01f3880d
ME
565 ld r5,SOFTE(r1)
566FW_FTR_SECTION_ELSE
917e407c 567 b .Liseries_check_pending_irqs
01f3880d
ME
568ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
5692:
945feb17 570 TRACE_AND_RESTORE_IRQ(r5);
9994a338 571
e56a6e20 572 /* extract EE bit and use it to restore paca->hard_enabled */
9994a338 573 ld r3,_MSR(r1)
e56a6e20
PM
574 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
575 stb r4,PACAHARDIRQEN(r13)
576
2d27cfd3
BH
577#ifdef CONFIG_PPC_BOOK3E
578 b .exception_return_book3e
579#else
e56a6e20
PM
580 ld r4,_CTR(r1)
581 ld r0,_LINK(r1)
582 mtctr r4
583 mtlr r0
584 ld r4,_XER(r1)
585 mtspr SPRN_XER,r4
586
587 REST_8GPRS(5, r1)
588
9994a338
PM
589 andi. r0,r3,MSR_RI
590 beq- unrecov_restore
591
f89451fb
AB
592 /*
593 * Clear the reservation. If we know the CPU tracks the address of
594 * the reservation then we can potentially save some cycles and use
595 * a larx. On POWER6 and POWER7 this is significantly faster.
596 */
597BEGIN_FTR_SECTION
e56a6e20 598 stdcx. r0,0,r1 /* to clear the reservation */
f89451fb
AB
599FTR_SECTION_ELSE
600 ldarx r4,0,r1
601ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
b0a779de 602
e56a6e20
PM
603 /*
604 * Clear RI before restoring r13. If we are returning to
605 * userspace and we take an exception after restoring r13,
606 * we end up corrupting the userspace r13 value.
607 */
608 mfmsr r4
609 andc r4,r4,r0 /* r0 contains MSR_RI here */
610 mtmsrd r4,1
9994a338
PM
611
612 /*
613 * r13 is our per cpu area, only restore it if we are returning to
614 * userspace
615 */
e56a6e20 616 andi. r0,r3,MSR_PR
9994a338 617 beq 1f
e56a6e20 618 ACCOUNT_CPU_USER_EXIT(r2, r4)
9994a338
PM
619 REST_GPR(13, r1)
6201:
e56a6e20 621 mtspr SPRN_SRR1,r3
9994a338
PM
622
623 ld r2,_CCR(r1)
624 mtcrf 0xFF,r2
625 ld r2,_NIP(r1)
626 mtspr SPRN_SRR0,r2
627
628 ld r0,GPR0(r1)
629 ld r2,GPR2(r1)
630 ld r3,GPR3(r1)
631 ld r4,GPR4(r1)
632 ld r1,GPR1(r1)
633
634 rfid
635 b . /* prevent speculative execution */
636
2d27cfd3
BH
637#endif /* CONFIG_PPC_BOOK3E */
638
917e407c 639.Liseries_check_pending_irqs:
01f3880d
ME
640#ifdef CONFIG_PPC_ISERIES
641 ld r5,SOFTE(r1)
642 cmpdi 0,r5,0
643 beq 2b
644 /* Check for pending interrupts (iSeries) */
645 ld r3,PACALPPACAPTR(r13)
646 ld r3,LPPACAANYINT(r3)
647 cmpdi r3,0
648 beq+ 2b /* skip do_IRQ if no interrupts */
649
650 li r3,0
651 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
652#ifdef CONFIG_TRACE_IRQFLAGS
653 bl .trace_hardirqs_off
654 mfmsr r10
655#endif
656 ori r10,r10,MSR_EE
657 mtmsrd r10 /* hard-enable again */
658 addi r3,r1,STACK_FRAME_OVERHEAD
659 bl .do_IRQ
660 b .ret_from_except_lite /* loop back and handle more */
661#endif
662
9994a338
PM
663do_work:
664#ifdef CONFIG_PREEMPT
665 andi. r0,r3,MSR_PR /* Returning to user mode? */
666 bne user_work
667 /* Check that preempt_count() == 0 and interrupts are enabled */
668 lwz r8,TI_PREEMPT(r9)
669 cmpwi cr1,r8,0
9994a338
PM
670 ld r0,SOFTE(r1)
671 cmpdi r0,0
9994a338
PM
672 crandc eq,cr1*4+eq,eq
673 bne restore
4f917ba3
BH
674
675 /* Here we are preempting the current task.
676 *
677 * Ensure interrupts are soft-disabled. We also properly mark
678 * the PACA to reflect the fact that they are hard-disabled
679 * and trace the change
945feb17 680 */
4f917ba3 681 li r0,0
d04c56f7
PM
682 stb r0,PACASOFTIRQEN(r13)
683 stb r0,PACAHARDIRQEN(r13)
4f917ba3
BH
684 TRACE_DISABLE_INTS
685
686 /* Call the scheduler with soft IRQs off */
6871: bl .preempt_schedule_irq
688
689 /* Hard-disable interrupts again (and update PACA) */
2d27cfd3 690#ifdef CONFIG_PPC_BOOK3E
2d27cfd3
BH
691 wrteei 0
692#else
9994a338 693 mfmsr r10
4f917ba3 694 rldicl r10,r10,48,1
9994a338
PM
695 rotldi r10,r10,16
696 mtmsrd r10,1
2d27cfd3 697#endif /* CONFIG_PPC_BOOK3E */
4f917ba3
BH
698 li r0,0
699 stb r0,PACAHARDIRQEN(r13)
700
701 /* Re-test flags and eventually loop */
702 clrrdi r9,r1,THREAD_SHIFT
9994a338
PM
703 ld r4,TI_FLAGS(r9)
704 andi. r0,r4,_TIF_NEED_RESCHED
705 bne 1b
706 b restore
707
708user_work:
4f917ba3
BH
709#endif /* CONFIG_PREEMPT */
710
9994a338 711 /* Enable interrupts */
2d27cfd3
BH
712#ifdef CONFIG_PPC_BOOK3E
713 wrteei 1
714#else
9994a338
PM
715 ori r10,r10,MSR_EE
716 mtmsrd r10,1
2d27cfd3 717#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
718
719 andi. r0,r4,_TIF_NEED_RESCHED
720 beq 1f
721 bl .schedule
722 b .ret_from_except_lite
723
7241: bl .save_nvgprs
7d6d637d 725 addi r3,r1,STACK_FRAME_OVERHEAD
9994a338
PM
726 bl .do_signal
727 b .ret_from_except
728
729unrecov_restore:
730 addi r3,r1,STACK_FRAME_OVERHEAD
731 bl .unrecoverable_exception
732 b unrecov_restore
733
734#ifdef CONFIG_PPC_RTAS
735/*
736 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
737 * called with the MMU off.
738 *
739 * In addition, we need to be in 32b mode, at least for now.
740 *
741 * Note: r3 is an input parameter to rtas, so don't trash it...
742 */
743_GLOBAL(enter_rtas)
744 mflr r0
745 std r0,16(r1)
746 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
747
748 /* Because RTAS is running in 32b mode, it clobbers the high order half
749 * of all registers that it saves. We therefore save those registers
750 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
751 */
752 SAVE_GPR(2, r1) /* Save the TOC */
753 SAVE_GPR(13, r1) /* Save paca */
754 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
755 SAVE_10GPRS(22, r1) /* ditto */
756
757 mfcr r4
758 std r4,_CCR(r1)
759 mfctr r5
760 std r5,_CTR(r1)
761 mfspr r6,SPRN_XER
762 std r6,_XER(r1)
763 mfdar r7
764 std r7,_DAR(r1)
765 mfdsisr r8
766 std r8,_DSISR(r1)
9994a338 767
9fe901d1
MK
768 /* Temporary workaround to clear CR until RTAS can be modified to
769 * ignore all bits.
770 */
771 li r0,0
772 mtcr r0
773
007d88d0 774#ifdef CONFIG_BUG
9994a338
PM
775 /* There is no way it is acceptable to get here with interrupts enabled,
776 * check it with the asm equivalent of WARN_ON
777 */
d04c56f7 778 lbz r0,PACASOFTIRQEN(r13)
9994a338 7791: tdnei r0,0
007d88d0
DW
780 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
781#endif
782
d04c56f7
PM
783 /* Hard-disable interrupts */
784 mfmsr r6
785 rldicl r7,r6,48,1
786 rotldi r7,r7,16
787 mtmsrd r7,1
788
9994a338
PM
789 /* Unfortunately, the stack pointer and the MSR are also clobbered,
790 * so they are saved in the PACA which allows us to restore
791 * our original state after RTAS returns.
792 */
793 std r1,PACAR1(r13)
794 std r6,PACASAVEDMSR(r13)
795
796 /* Setup our real return addr */
e58c3495
DG
797 LOAD_REG_ADDR(r4,.rtas_return_loc)
798 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
799 mtlr r4
800
801 li r0,0
802 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
803 andc r0,r6,r0
804
805 li r9,1
806 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
44c9f3cc 807 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
9994a338 808 andc r6,r0,r9
9994a338
PM
809 sync /* disable interrupts so SRR0/1 */
810 mtmsrd r0 /* don't get trashed */
811
e58c3495 812 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
813 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
814 ld r4,RTASBASE(r4) /* get the rtas->base value */
815
816 mtspr SPRN_SRR0,r5
817 mtspr SPRN_SRR1,r6
818 rfid
819 b . /* prevent speculative execution */
820
821_STATIC(rtas_return_loc)
822 /* relocation is off at this point */
ee43eb78 823 mfspr r4,SPRN_SPRG_PACA /* Get PACA */
e58c3495 824 clrldi r4,r4,2 /* convert to realmode address */
9994a338 825
e31aa453
PM
826 bcl 20,31,$+4
8270: mflr r3
828 ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */
829
9994a338
PM
830 mfmsr r6
831 li r0,MSR_RI
832 andc r6,r6,r0
833 sync
834 mtmsrd r6
835
836 ld r1,PACAR1(r4) /* Restore our SP */
9994a338
PM
837 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
838
839 mtspr SPRN_SRR0,r3
840 mtspr SPRN_SRR1,r4
841 rfid
842 b . /* prevent speculative execution */
843
e31aa453
PM
844 .align 3
8451: .llong .rtas_restore_regs
846
9994a338
PM
847_STATIC(rtas_restore_regs)
848 /* relocation is on at this point */
849 REST_GPR(2, r1) /* Restore the TOC */
850 REST_GPR(13, r1) /* Restore paca */
851 REST_8GPRS(14, r1) /* Restore the non-volatiles */
852 REST_10GPRS(22, r1) /* ditto */
853
ee43eb78 854 mfspr r13,SPRN_SPRG_PACA
9994a338
PM
855
856 ld r4,_CCR(r1)
857 mtcr r4
858 ld r5,_CTR(r1)
859 mtctr r5
860 ld r6,_XER(r1)
861 mtspr SPRN_XER,r6
862 ld r7,_DAR(r1)
863 mtdar r7
864 ld r8,_DSISR(r1)
865 mtdsisr r8
9994a338
PM
866
867 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
868 ld r0,16(r1) /* get return address */
869
870 mtlr r0
871 blr /* return to caller */
872
873#endif /* CONFIG_PPC_RTAS */
874
9994a338
PM
875_GLOBAL(enter_prom)
876 mflr r0
877 std r0,16(r1)
878 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
879
880 /* Because PROM is running in 32b mode, it clobbers the high order half
881 * of all registers that it saves. We therefore save those registers
882 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
883 */
6c171994 884 SAVE_GPR(2, r1)
9994a338
PM
885 SAVE_GPR(13, r1)
886 SAVE_8GPRS(14, r1)
887 SAVE_10GPRS(22, r1)
6c171994 888 mfcr r10
9994a338 889 mfmsr r11
6c171994 890 std r10,_CCR(r1)
9994a338
PM
891 std r11,_MSR(r1)
892
893 /* Get the PROM entrypoint */
6c171994 894 mtlr r4
9994a338
PM
895
896 /* Switch MSR to 32 bits mode
897 */
2d27cfd3
BH
898#ifdef CONFIG_PPC_BOOK3E
899 rlwinm r11,r11,0,1,31
900 mtmsr r11
901#else /* CONFIG_PPC_BOOK3E */
9994a338
PM
902 mfmsr r11
903 li r12,1
904 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
905 andc r11,r11,r12
906 li r12,1
907 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
908 andc r11,r11,r12
909 mtmsrd r11
2d27cfd3 910#endif /* CONFIG_PPC_BOOK3E */
9994a338
PM
911 isync
912
6c171994 913 /* Enter PROM here... */
9994a338
PM
914 blrl
915
916 /* Just make sure that r1 top 32 bits didn't get
917 * corrupt by OF
918 */
919 rldicl r1,r1,0,32
920
921 /* Restore the MSR (back to 64 bits) */
922 ld r0,_MSR(r1)
6c171994 923 MTMSRD(r0)
9994a338
PM
924 isync
925
926 /* Restore other registers */
927 REST_GPR(2, r1)
928 REST_GPR(13, r1)
929 REST_8GPRS(14, r1)
930 REST_10GPRS(22, r1)
931 ld r4,_CCR(r1)
932 mtcr r4
9994a338
PM
933
934 addi r1,r1,PROM_FRAME_SIZE
935 ld r0,16(r1)
936 mtlr r0
937 blr
4e491d14 938
606576ce 939#ifdef CONFIG_FUNCTION_TRACER
4e491d14
SR
940#ifdef CONFIG_DYNAMIC_FTRACE
941_GLOBAL(mcount)
942_GLOBAL(_mcount)
4e491d14
SR
943 blr
944
945_GLOBAL(ftrace_caller)
946 /* Taken from output of objdump from lib64/glibc */
947 mflr r3
948 ld r11, 0(r1)
949 stdu r1, -112(r1)
950 std r3, 128(r1)
951 ld r4, 16(r11)
395a59d0 952 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
953.globl ftrace_call
954ftrace_call:
955 bl ftrace_stub
956 nop
46542888
SR
957#ifdef CONFIG_FUNCTION_GRAPH_TRACER
958.globl ftrace_graph_call
959ftrace_graph_call:
960 b ftrace_graph_stub
961_GLOBAL(ftrace_graph_stub)
962#endif
4e491d14
SR
963 ld r0, 128(r1)
964 mtlr r0
965 addi r1, r1, 112
966_GLOBAL(ftrace_stub)
967 blr
968#else
969_GLOBAL(mcount)
970 blr
971
972_GLOBAL(_mcount)
973 /* Taken from output of objdump from lib64/glibc */
974 mflr r3
975 ld r11, 0(r1)
976 stdu r1, -112(r1)
977 std r3, 128(r1)
978 ld r4, 16(r11)
979
395a59d0 980 subi r3, r3, MCOUNT_INSN_SIZE
4e491d14
SR
981 LOAD_REG_ADDR(r5,ftrace_trace_function)
982 ld r5,0(r5)
983 ld r5,0(r5)
984 mtctr r5
985 bctrl
4e491d14 986 nop
6794c782
SR
987
988
989#ifdef CONFIG_FUNCTION_GRAPH_TRACER
990 b ftrace_graph_caller
991#endif
4e491d14
SR
992 ld r0, 128(r1)
993 mtlr r0
994 addi r1, r1, 112
995_GLOBAL(ftrace_stub)
996 blr
997
6794c782
SR
998#endif /* CONFIG_DYNAMIC_FTRACE */
999
1000#ifdef CONFIG_FUNCTION_GRAPH_TRACER
46542888 1001_GLOBAL(ftrace_graph_caller)
6794c782
SR
1002 /* load r4 with local address */
1003 ld r4, 128(r1)
1004 subi r4, r4, MCOUNT_INSN_SIZE
1005
1006 /* get the parent address */
1007 ld r11, 112(r1)
1008 addi r3, r11, 16
1009
1010 bl .prepare_ftrace_return
1011 nop
1012
1013 ld r0, 128(r1)
1014 mtlr r0
1015 addi r1, r1, 112
1016 blr
1017
1018_GLOBAL(return_to_handler)
bb725340
SR
1019 /* need to save return values */
1020 std r4, -24(r1)
1021 std r3, -16(r1)
1022 std r31, -8(r1)
1023 mr r31, r1
1024 stdu r1, -112(r1)
1025
1026 bl .ftrace_return_to_handler
1027 nop
1028
1029 /* return value has real return address */
1030 mtlr r3
1031
1032 ld r1, 0(r1)
1033 ld r4, -24(r1)
1034 ld r3, -16(r1)
1035 ld r31, -8(r1)
1036
1037 /* Jump back to real return address */
1038 blr
1039
1040_GLOBAL(mod_return_to_handler)
6794c782
SR
1041 /* need to save return values */
1042 std r4, -32(r1)
1043 std r3, -24(r1)
1044 /* save TOC */
1045 std r2, -16(r1)
1046 std r31, -8(r1)
1047 mr r31, r1
1048 stdu r1, -112(r1)
1049
bb725340
SR
1050 /*
1051 * We are in a module using the module's TOC.
1052 * Switch to our TOC to run inside the core kernel.
1053 */
be10ab10 1054 ld r2, PACATOC(r13)
6794c782
SR
1055
1056 bl .ftrace_return_to_handler
1057 nop
1058
1059 /* return value has real return address */
1060 mtlr r3
1061
1062 ld r1, 0(r1)
1063 ld r4, -32(r1)
1064 ld r3, -24(r1)
1065 ld r2, -16(r1)
1066 ld r31, -8(r1)
1067
1068 /* Jump back to real return address */
1069 blr
1070#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1071#endif /* CONFIG_FUNCTION_TRACER */