ftrace, POWERPC: add irqs_disabled_flags to ppc
[linux-2.6-block.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
007d88d0 31#include <asm/bug.h>
ec2b36b9 32#include <asm/ptrace.h>
945feb17 33#include <asm/irqflags.h>
9994a338
PM
34
35/*
36 * System calls.
37 */
38 .section ".toc","aw"
39.SYS_CALL_TABLE:
40 .tc .sys_call_table[TC],.sys_call_table
41
42/* This value is used to mark exception frames on the stack. */
43exception_marker:
ec2b36b9 44 .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
9994a338
PM
45
46 .section ".text"
47 .align 7
48
49#undef SHOW_SYSCALLS
50
51 .globl system_call_common
52system_call_common:
53 andi. r10,r12,MSR_PR
54 mr r10,r1
55 addi r1,r1,-INT_FRAME_SIZE
56 beq- 1f
57 ld r1,PACAKSAVE(r13)
581: std r10,0(r1)
bd19c899 59 crclr so
9994a338
PM
60 std r11,_NIP(r1)
61 std r12,_MSR(r1)
62 std r0,GPR0(r1)
63 std r10,GPR1(r1)
c6622f63 64 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
65 std r2,GPR2(r1)
66 std r3,GPR3(r1)
67 std r4,GPR4(r1)
68 std r5,GPR5(r1)
69 std r6,GPR6(r1)
70 std r7,GPR7(r1)
71 std r8,GPR8(r1)
72 li r11,0
73 std r11,GPR9(r1)
74 std r11,GPR10(r1)
75 std r11,GPR11(r1)
76 std r11,GPR12(r1)
77 std r9,GPR13(r1)
9994a338
PM
78 mfcr r9
79 mflr r10
80 li r11,0xc01
81 std r9,_CCR(r1)
82 std r10,_LINK(r1)
83 std r11,_TRAP(r1)
84 mfxer r9
85 mfctr r10
86 std r9,_XER(r1)
87 std r10,_CTR(r1)
88 std r3,ORIG_GPR3(r1)
89 ld r2,PACATOC(r13)
90 addi r9,r1,STACK_FRAME_OVERHEAD
91 ld r11,exception_marker@toc(r2)
92 std r11,-16(r9) /* "regshere" marker */
945feb17
BH
93#ifdef CONFIG_TRACE_IRQFLAGS
94 bl .trace_hardirqs_on
95 REST_GPR(0,r1)
96 REST_4GPRS(3,r1)
97 REST_2GPRS(7,r1)
98 addi r9,r1,STACK_FRAME_OVERHEAD
99 ld r12,_MSR(r1)
100#endif /* CONFIG_TRACE_IRQFLAGS */
d04c56f7
PM
101 li r10,1
102 stb r10,PACASOFTIRQEN(r13)
103 stb r10,PACAHARDIRQEN(r13)
104 std r10,SOFTE(r1)
9994a338 105#ifdef CONFIG_PPC_ISERIES
3f639ee8 106BEGIN_FW_FTR_SECTION
9994a338
PM
107 /* Hack for handling interrupts when soft-enabling on iSeries */
108 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
109 andi. r10,r12,MSR_PR /* from kernel */
110 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
111 bne 2f
112 b hardware_interrupt_entry
1132:
3f639ee8 114END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
945feb17 115#endif /* CONFIG_PPC_ISERIES */
9994a338
PM
116 mfmsr r11
117 ori r11,r11,MSR_EE
118 mtmsrd r11,1
119
120#ifdef SHOW_SYSCALLS
121 bl .do_show_syscall
122 REST_GPR(0,r1)
123 REST_4GPRS(3,r1)
124 REST_2GPRS(7,r1)
125 addi r9,r1,STACK_FRAME_OVERHEAD
126#endif
127 clrrdi r11,r1,THREAD_SHIFT
9994a338 128 ld r10,TI_FLAGS(r11)
9994a338
PM
129 andi. r11,r10,_TIF_SYSCALL_T_OR_A
130 bne- syscall_dotrace
131syscall_dotrace_cont:
132 cmpldi 0,r0,NR_syscalls
133 bge- syscall_enosys
134
135system_call: /* label this so stack traces look sane */
136/*
137 * Need to vector to 32 Bit or default sys_call_table here,
138 * based on caller's run-mode / personality.
139 */
140 ld r11,.SYS_CALL_TABLE@toc(2)
141 andi. r10,r10,_TIF_32BIT
142 beq 15f
143 addi r11,r11,8 /* use 32-bit syscall entries */
144 clrldi r3,r3,32
145 clrldi r4,r4,32
146 clrldi r5,r5,32
147 clrldi r6,r6,32
148 clrldi r7,r7,32
149 clrldi r8,r8,32
15015:
151 slwi r0,r0,4
152 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
153 mtctr r10
154 bctrl /* Call handler */
155
156syscall_exit:
401d1f02 157 std r3,RESULT(r1)
9994a338 158#ifdef SHOW_SYSCALLS
9994a338 159 bl .do_show_syscall_exit
401d1f02 160 ld r3,RESULT(r1)
9994a338 161#endif
9994a338 162 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
163
164 /* disable interrupts so current_thread_info()->flags can't change,
165 and so that we don't get interrupted after loading SRR0/1. */
166 ld r8,_MSR(r1)
167 andi. r10,r8,MSR_RI
168 beq- unrecov_restore
169 mfmsr r10
170 rldicl r10,r10,48,1
171 rotldi r10,r10,16
172 mtmsrd r10,1
173 ld r9,TI_FLAGS(r12)
401d1f02 174 li r11,-_LAST_ERRNO
1bd79336 175 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 176 bne- syscall_exit_work
401d1f02
DW
177 cmpld r3,r11
178 ld r5,_CCR(r1)
179 bge- syscall_error
180syscall_error_cont:
9994a338
PM
181 ld r7,_NIP(r1)
182 stdcx. r0,0,r1 /* to clear the reservation */
183 andi. r6,r8,MSR_PR
184 ld r4,_LINK(r1)
e56a6e20
PM
185 /*
186 * Clear RI before restoring r13. If we are returning to
187 * userspace and we take an exception after restoring r13,
188 * we end up corrupting the userspace r13 value.
189 */
190 li r12,MSR_RI
191 andc r11,r10,r12
192 mtmsrd r11,1 /* clear MSR.RI */
c6622f63
PM
193 beq- 1f
194 ACCOUNT_CPU_USER_EXIT(r11, r12)
195 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338 1961: ld r2,GPR2(r1)
9994a338
PM
197 ld r1,GPR1(r1)
198 mtlr r4
199 mtcr r5
200 mtspr SPRN_SRR0,r7
201 mtspr SPRN_SRR1,r8
202 rfid
203 b . /* prevent speculative execution */
204
401d1f02 205syscall_error:
9994a338 206 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 207 neg r3,r3
9994a338
PM
208 std r5,_CCR(r1)
209 b syscall_error_cont
401d1f02 210
9994a338
PM
211/* Traced system call support */
212syscall_dotrace:
213 bl .save_nvgprs
214 addi r3,r1,STACK_FRAME_OVERHEAD
215 bl .do_syscall_trace_enter
216 ld r0,GPR0(r1) /* Restore original registers */
217 ld r3,GPR3(r1)
218 ld r4,GPR4(r1)
219 ld r5,GPR5(r1)
220 ld r6,GPR6(r1)
221 ld r7,GPR7(r1)
222 ld r8,GPR8(r1)
223 addi r9,r1,STACK_FRAME_OVERHEAD
224 clrrdi r10,r1,THREAD_SHIFT
225 ld r10,TI_FLAGS(r10)
226 b syscall_dotrace_cont
227
401d1f02
DW
228syscall_enosys:
229 li r3,-ENOSYS
230 b syscall_exit
231
232syscall_exit_work:
233 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
234 If TIF_NOERROR is set, just save r3 as it is. */
235
236 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
237 beq+ 0f
238 REST_NVGPRS(r1)
239 b 2f
2400: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
241 blt+ 1f
242 andi. r0,r9,_TIF_NOERROR
243 bne- 1f
244 ld r5,_CCR(r1)
245 neg r3,r3
246 oris r5,r5,0x1000 /* Set SO bit in CR */
247 std r5,_CCR(r1)
2481: std r3,GPR3(r1)
2492: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
250 beq 4f
251
1bd79336 252 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
253
254 li r11,_TIF_PERSYSCALL_MASK
255 addi r12,r12,TI_FLAGS
2563: ldarx r10,0,r12
257 andc r10,r10,r11
258 stdcx. r10,0,r12
259 bne- 3b
260 subi r12,r12,TI_FLAGS
1bd79336
PM
261
2624: /* Anything else left to do? */
263 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
264 beq .ret_from_except_lite
265
266 /* Re-enable interrupts */
267 mfmsr r10
268 ori r10,r10,MSR_EE
269 mtmsrd r10,1
270
1bd79336 271 bl .save_nvgprs
9994a338
PM
272 addi r3,r1,STACK_FRAME_OVERHEAD
273 bl .do_syscall_trace_leave
1bd79336 274 b .ret_from_except
9994a338
PM
275
276/* Save non-volatile GPRs, if not already saved. */
277_GLOBAL(save_nvgprs)
278 ld r11,_TRAP(r1)
279 andi. r0,r11,1
280 beqlr-
281 SAVE_NVGPRS(r1)
282 clrrdi r0,r11,1
283 std r0,_TRAP(r1)
284 blr
285
401d1f02 286
9994a338
PM
287/*
288 * The sigsuspend and rt_sigsuspend system calls can call do_signal
289 * and thus put the process into the stopped state where we might
290 * want to examine its user state with ptrace. Therefore we need
291 * to save all the nonvolatile registers (r14 - r31) before calling
292 * the C code. Similarly, fork, vfork and clone need the full
293 * register state on the stack so that it can be copied to the child.
294 */
9994a338
PM
295
296_GLOBAL(ppc_fork)
297 bl .save_nvgprs
298 bl .sys_fork
299 b syscall_exit
300
301_GLOBAL(ppc_vfork)
302 bl .save_nvgprs
303 bl .sys_vfork
304 b syscall_exit
305
306_GLOBAL(ppc_clone)
307 bl .save_nvgprs
308 bl .sys_clone
309 b syscall_exit
310
1bd79336
PM
311_GLOBAL(ppc32_swapcontext)
312 bl .save_nvgprs
313 bl .compat_sys_swapcontext
314 b syscall_exit
315
316_GLOBAL(ppc64_swapcontext)
317 bl .save_nvgprs
318 bl .sys_swapcontext
319 b syscall_exit
320
9994a338
PM
321_GLOBAL(ret_from_fork)
322 bl .schedule_tail
323 REST_NVGPRS(r1)
324 li r3,0
325 b syscall_exit
326
327/*
328 * This routine switches between two different tasks. The process
329 * state of one is saved on its kernel stack. Then the state
330 * of the other is restored from its kernel stack. The memory
331 * management hardware is updated to the second process's state.
332 * Finally, we can return to the second process, via ret_from_except.
333 * On entry, r3 points to the THREAD for the current task, r4
334 * points to the THREAD for the new task.
335 *
336 * Note: there are two ways to get to the "going out" portion
337 * of this code; either by coming in via the entry (_switch)
338 * or via "fork" which must set up an environment equivalent
339 * to the "_switch" path. If you change this you'll have to change
340 * the fork code also.
341 *
342 * The code which creates the new task context is in 'copy_thread'
2ef9481e 343 * in arch/powerpc/kernel/process.c
9994a338
PM
344 */
345 .align 7
346_GLOBAL(_switch)
347 mflr r0
348 std r0,16(r1)
349 stdu r1,-SWITCH_FRAME_SIZE(r1)
350 /* r3-r13 are caller saved -- Cort */
351 SAVE_8GPRS(14, r1)
352 SAVE_10GPRS(22, r1)
353 mflr r20 /* Return to switch caller */
354 mfmsr r22
355 li r0, MSR_FP
356#ifdef CONFIG_ALTIVEC
357BEGIN_FTR_SECTION
358 oris r0,r0,MSR_VEC@h /* Disable altivec */
359 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
360 std r24,THREAD_VRSAVE(r3)
361END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
362#endif /* CONFIG_ALTIVEC */
363 and. r0,r0,r22
364 beq+ 1f
365 andc r22,r22,r0
366 mtmsrd r22
367 isync
3681: std r20,_NIP(r1)
369 mfcr r23
370 std r23,_CCR(r1)
371 std r1,KSP(r3) /* Set old stack pointer */
372
373#ifdef CONFIG_SMP
374 /* We need a sync somewhere here to make sure that if the
375 * previous task gets rescheduled on another CPU, it sees all
376 * stores it has performed on this one.
377 */
378 sync
379#endif /* CONFIG_SMP */
380
381 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
382 std r6,PACACURRENT(r13) /* Set new 'current' */
383
384 ld r8,KSP(r4) /* new stack pointer */
1189be65
PM
385BEGIN_FTR_SECTION
386 b 2f
387END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
9994a338
PM
388BEGIN_FTR_SECTION
389 clrrdi r6,r8,28 /* get its ESID */
390 clrrdi r9,r1,28 /* get current sp ESID */
1189be65
PM
391END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
392BEGIN_FTR_SECTION
393 clrrdi r6,r8,40 /* get its 1T ESID */
394 clrrdi r9,r1,40 /* get current sp 1T ESID */
395END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
9994a338
PM
396 clrldi. r0,r6,2 /* is new ESID c00000000? */
397 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
398 cror eq,4*cr1+eq,eq
399 beq 2f /* if yes, don't slbie it */
400
401 /* Bolt in the new stack SLB entry */
402 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
403 oris r0,r6,(SLB_ESID_V)@h
404 ori r0,r0,(SLB_NUM_BOLTED-1)@l
1189be65
PM
405BEGIN_FTR_SECTION
406 li r9,MMU_SEGSIZE_1T /* insert B field */
407 oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
408 rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0
409END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT)
2f6093c8 410
00efee7d
MN
411 /* Update the last bolted SLB. No write barriers are needed
412 * here, provided we only update the current CPU's SLB shadow
413 * buffer.
414 */
2f6093c8 415 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
416 li r12,0
417 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
418 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
419 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 420
f66bce5e
OJ
421 /* No need to check for CPU_FTR_NO_SLBIE_B here, since when
422 * we have 1TB segments, the only CPUs known to have the errata
423 * only support less than 1TB of system memory and we'll never
424 * actually hit this code path.
425 */
426
9994a338
PM
427 slbie r6
428 slbie r6 /* Workaround POWER5 < DD2.1 issue */
429 slbmte r7,r0
430 isync
431
4322:
9994a338
PM
433 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
434 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
435 because we don't need to leave the 288-byte ABI gap at the
436 top of the kernel stack. */
437 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
438
439 mr r1,r8 /* start using new stack pointer */
440 std r7,PACAKSAVE(r13)
441
442 ld r6,_CCR(r1)
443 mtcrf 0xFF,r6
444
445#ifdef CONFIG_ALTIVEC
446BEGIN_FTR_SECTION
447 ld r0,THREAD_VRSAVE(r4)
448 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
449END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
450#endif /* CONFIG_ALTIVEC */
451
452 /* r3-r13 are destroyed -- Cort */
453 REST_8GPRS(14, r1)
454 REST_10GPRS(22, r1)
455
456 /* convert old thread to its task_struct for return value */
457 addi r3,r3,-THREAD
458 ld r7,_NIP(r1) /* Return to _switch caller in new task */
459 mtlr r7
460 addi r1,r1,SWITCH_FRAME_SIZE
461 blr
462
463 .align 7
464_GLOBAL(ret_from_except)
465 ld r11,_TRAP(r1)
466 andi. r0,r11,1
467 bne .ret_from_except_lite
468 REST_NVGPRS(r1)
469
470_GLOBAL(ret_from_except_lite)
471 /*
472 * Disable interrupts so that current_thread_info()->flags
473 * can't change between when we test it and when we return
474 * from the interrupt.
475 */
476 mfmsr r10 /* Get current interrupt state */
477 rldicl r9,r10,48,1 /* clear MSR_EE */
478 rotldi r9,r9,16
479 mtmsrd r9,1 /* Update machine state */
480
481#ifdef CONFIG_PREEMPT
482 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
483 li r0,_TIF_NEED_RESCHED /* bits to check */
484 ld r3,_MSR(r1)
485 ld r4,TI_FLAGS(r9)
486 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
487 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
488 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
489 bne do_work
490
491#else /* !CONFIG_PREEMPT */
492 ld r3,_MSR(r1) /* Returning to user mode? */
493 andi. r3,r3,MSR_PR
494 beq restore /* if not, just restore regs and return */
495
496 /* Check current_thread_info()->flags */
497 clrrdi r9,r1,THREAD_SHIFT
498 ld r4,TI_FLAGS(r9)
499 andi. r0,r4,_TIF_USER_WORK_MASK
500 bne do_work
501#endif
502
503restore:
d04c56f7 504 ld r5,SOFTE(r1)
9994a338 505#ifdef CONFIG_PPC_ISERIES
3f639ee8 506BEGIN_FW_FTR_SECTION
9994a338
PM
507 cmpdi 0,r5,0
508 beq 4f
509 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
510 ld r3,PACALPPACAPTR(r13)
511 ld r3,LPPACAANYINT(r3)
9994a338
PM
512 cmpdi r3,0
513 beq+ 4f /* skip do_IRQ if no interrupts */
514
515 li r3,0
d04c56f7 516 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
945feb17
BH
517#ifdef CONFIG_TRACE_IRQFLAGS
518 bl .trace_hardirqs_off
519 mfmsr r10
520#endif
9994a338
PM
521 ori r10,r10,MSR_EE
522 mtmsrd r10 /* hard-enable again */
523 addi r3,r1,STACK_FRAME_OVERHEAD
524 bl .do_IRQ
525 b .ret_from_except_lite /* loop back and handle more */
d04c56f7 5264:
3f639ee8 527END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338 528#endif
945feb17 529 TRACE_AND_RESTORE_IRQ(r5);
9994a338 530
e56a6e20 531 /* extract EE bit and use it to restore paca->hard_enabled */
9994a338 532 ld r3,_MSR(r1)
e56a6e20
PM
533 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
534 stb r4,PACAHARDIRQEN(r13)
535
536 ld r4,_CTR(r1)
537 ld r0,_LINK(r1)
538 mtctr r4
539 mtlr r0
540 ld r4,_XER(r1)
541 mtspr SPRN_XER,r4
542
543 REST_8GPRS(5, r1)
544
9994a338
PM
545 andi. r0,r3,MSR_RI
546 beq- unrecov_restore
547
e56a6e20 548 stdcx. r0,0,r1 /* to clear the reservation */
b0a779de 549
e56a6e20
PM
550 /*
551 * Clear RI before restoring r13. If we are returning to
552 * userspace and we take an exception after restoring r13,
553 * we end up corrupting the userspace r13 value.
554 */
555 mfmsr r4
556 andc r4,r4,r0 /* r0 contains MSR_RI here */
557 mtmsrd r4,1
9994a338
PM
558
559 /*
560 * r13 is our per cpu area, only restore it if we are returning to
561 * userspace
562 */
e56a6e20 563 andi. r0,r3,MSR_PR
9994a338 564 beq 1f
e56a6e20 565 ACCOUNT_CPU_USER_EXIT(r2, r4)
9994a338
PM
566 REST_GPR(13, r1)
5671:
e56a6e20 568 mtspr SPRN_SRR1,r3
9994a338
PM
569
570 ld r2,_CCR(r1)
571 mtcrf 0xFF,r2
572 ld r2,_NIP(r1)
573 mtspr SPRN_SRR0,r2
574
575 ld r0,GPR0(r1)
576 ld r2,GPR2(r1)
577 ld r3,GPR3(r1)
578 ld r4,GPR4(r1)
579 ld r1,GPR1(r1)
580
581 rfid
582 b . /* prevent speculative execution */
583
9994a338
PM
584do_work:
585#ifdef CONFIG_PREEMPT
586 andi. r0,r3,MSR_PR /* Returning to user mode? */
587 bne user_work
588 /* Check that preempt_count() == 0 and interrupts are enabled */
589 lwz r8,TI_PREEMPT(r9)
590 cmpwi cr1,r8,0
9994a338
PM
591 ld r0,SOFTE(r1)
592 cmpdi r0,0
9994a338
PM
593 crandc eq,cr1*4+eq,eq
594 bne restore
595 /* here we are preempting the current task */
5961:
945feb17
BH
597#ifdef CONFIG_TRACE_IRQFLAGS
598 bl .trace_hardirqs_on
599 /* Note: we just clobbered r10 which used to contain the previous
600 * MSR before the hard-disabling done by the caller of do_work.
601 * We don't have that value anymore, but it doesn't matter as
602 * we will hard-enable unconditionally, we can just reload the
603 * current MSR into r10
604 */
605 mfmsr r10
606#endif /* CONFIG_TRACE_IRQFLAGS */
9994a338 607 li r0,1
d04c56f7
PM
608 stb r0,PACASOFTIRQEN(r13)
609 stb r0,PACAHARDIRQEN(r13)
9994a338
PM
610 ori r10,r10,MSR_EE
611 mtmsrd r10,1 /* reenable interrupts */
612 bl .preempt_schedule
613 mfmsr r10
614 clrrdi r9,r1,THREAD_SHIFT
615 rldicl r10,r10,48,1 /* disable interrupts again */
616 rotldi r10,r10,16
617 mtmsrd r10,1
618 ld r4,TI_FLAGS(r9)
619 andi. r0,r4,_TIF_NEED_RESCHED
620 bne 1b
621 b restore
622
623user_work:
624#endif
625 /* Enable interrupts */
626 ori r10,r10,MSR_EE
627 mtmsrd r10,1
628
629 andi. r0,r4,_TIF_NEED_RESCHED
630 beq 1f
631 bl .schedule
632 b .ret_from_except_lite
633
6341: bl .save_nvgprs
635 li r3,0
636 addi r4,r1,STACK_FRAME_OVERHEAD
637 bl .do_signal
638 b .ret_from_except
639
640unrecov_restore:
641 addi r3,r1,STACK_FRAME_OVERHEAD
642 bl .unrecoverable_exception
643 b unrecov_restore
644
645#ifdef CONFIG_PPC_RTAS
646/*
647 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
648 * called with the MMU off.
649 *
650 * In addition, we need to be in 32b mode, at least for now.
651 *
652 * Note: r3 is an input parameter to rtas, so don't trash it...
653 */
654_GLOBAL(enter_rtas)
655 mflr r0
656 std r0,16(r1)
657 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
658
659 /* Because RTAS is running in 32b mode, it clobbers the high order half
660 * of all registers that it saves. We therefore save those registers
661 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
662 */
663 SAVE_GPR(2, r1) /* Save the TOC */
664 SAVE_GPR(13, r1) /* Save paca */
665 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
666 SAVE_10GPRS(22, r1) /* ditto */
667
668 mfcr r4
669 std r4,_CCR(r1)
670 mfctr r5
671 std r5,_CTR(r1)
672 mfspr r6,SPRN_XER
673 std r6,_XER(r1)
674 mfdar r7
675 std r7,_DAR(r1)
676 mfdsisr r8
677 std r8,_DSISR(r1)
678 mfsrr0 r9
679 std r9,_SRR0(r1)
680 mfsrr1 r10
681 std r10,_SRR1(r1)
682
9fe901d1
MK
683 /* Temporary workaround to clear CR until RTAS can be modified to
684 * ignore all bits.
685 */
686 li r0,0
687 mtcr r0
688
007d88d0 689#ifdef CONFIG_BUG
9994a338
PM
690 /* There is no way it is acceptable to get here with interrupts enabled,
691 * check it with the asm equivalent of WARN_ON
692 */
d04c56f7 693 lbz r0,PACASOFTIRQEN(r13)
9994a338 6941: tdnei r0,0
007d88d0
DW
695 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
696#endif
697
d04c56f7
PM
698 /* Hard-disable interrupts */
699 mfmsr r6
700 rldicl r7,r6,48,1
701 rotldi r7,r7,16
702 mtmsrd r7,1
703
9994a338
PM
704 /* Unfortunately, the stack pointer and the MSR are also clobbered,
705 * so they are saved in the PACA which allows us to restore
706 * our original state after RTAS returns.
707 */
708 std r1,PACAR1(r13)
709 std r6,PACASAVEDMSR(r13)
710
711 /* Setup our real return addr */
e58c3495
DG
712 LOAD_REG_ADDR(r4,.rtas_return_loc)
713 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
714 mtlr r4
715
716 li r0,0
717 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
718 andc r0,r6,r0
719
720 li r9,1
721 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
722 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
723 andc r6,r0,r9
724 ori r6,r6,MSR_RI
725 sync /* disable interrupts so SRR0/1 */
726 mtmsrd r0 /* don't get trashed */
727
e58c3495 728 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
729 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
730 ld r4,RTASBASE(r4) /* get the rtas->base value */
731
732 mtspr SPRN_SRR0,r5
733 mtspr SPRN_SRR1,r6
734 rfid
735 b . /* prevent speculative execution */
736
737_STATIC(rtas_return_loc)
738 /* relocation is off at this point */
739 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 740 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
741
742 mfmsr r6
743 li r0,MSR_RI
744 andc r6,r6,r0
745 sync
746 mtmsrd r6
747
748 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 749 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
750 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
751
752 mtspr SPRN_SRR0,r3
753 mtspr SPRN_SRR1,r4
754 rfid
755 b . /* prevent speculative execution */
756
757_STATIC(rtas_restore_regs)
758 /* relocation is on at this point */
759 REST_GPR(2, r1) /* Restore the TOC */
760 REST_GPR(13, r1) /* Restore paca */
761 REST_8GPRS(14, r1) /* Restore the non-volatiles */
762 REST_10GPRS(22, r1) /* ditto */
763
764 mfspr r13,SPRN_SPRG3
765
766 ld r4,_CCR(r1)
767 mtcr r4
768 ld r5,_CTR(r1)
769 mtctr r5
770 ld r6,_XER(r1)
771 mtspr SPRN_XER,r6
772 ld r7,_DAR(r1)
773 mtdar r7
774 ld r8,_DSISR(r1)
775 mtdsisr r8
776 ld r9,_SRR0(r1)
777 mtsrr0 r9
778 ld r10,_SRR1(r1)
779 mtsrr1 r10
780
781 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
782 ld r0,16(r1) /* get return address */
783
784 mtlr r0
785 blr /* return to caller */
786
787#endif /* CONFIG_PPC_RTAS */
788
9994a338
PM
789_GLOBAL(enter_prom)
790 mflr r0
791 std r0,16(r1)
792 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
793
794 /* Because PROM is running in 32b mode, it clobbers the high order half
795 * of all registers that it saves. We therefore save those registers
796 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
797 */
798 SAVE_8GPRS(2, r1)
799 SAVE_GPR(13, r1)
800 SAVE_8GPRS(14, r1)
801 SAVE_10GPRS(22, r1)
802 mfcr r4
803 std r4,_CCR(r1)
804 mfctr r5
805 std r5,_CTR(r1)
806 mfspr r6,SPRN_XER
807 std r6,_XER(r1)
808 mfdar r7
809 std r7,_DAR(r1)
810 mfdsisr r8
811 std r8,_DSISR(r1)
812 mfsrr0 r9
813 std r9,_SRR0(r1)
814 mfsrr1 r10
815 std r10,_SRR1(r1)
816 mfmsr r11
817 std r11,_MSR(r1)
818
819 /* Get the PROM entrypoint */
820 ld r0,GPR4(r1)
821 mtlr r0
822
823 /* Switch MSR to 32 bits mode
824 */
825 mfmsr r11
826 li r12,1
827 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
828 andc r11,r11,r12
829 li r12,1
830 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
831 andc r11,r11,r12
832 mtmsrd r11
833 isync
834
835 /* Restore arguments & enter PROM here... */
836 ld r3,GPR3(r1)
837 blrl
838
839 /* Just make sure that r1 top 32 bits didn't get
840 * corrupt by OF
841 */
842 rldicl r1,r1,0,32
843
844 /* Restore the MSR (back to 64 bits) */
845 ld r0,_MSR(r1)
846 mtmsrd r0
847 isync
848
849 /* Restore other registers */
850 REST_GPR(2, r1)
851 REST_GPR(13, r1)
852 REST_8GPRS(14, r1)
853 REST_10GPRS(22, r1)
854 ld r4,_CCR(r1)
855 mtcr r4
856 ld r5,_CTR(r1)
857 mtctr r5
858 ld r6,_XER(r1)
859 mtspr SPRN_XER,r6
860 ld r7,_DAR(r1)
861 mtdar r7
862 ld r8,_DSISR(r1)
863 mtdsisr r8
864 ld r9,_SRR0(r1)
865 mtsrr0 r9
866 ld r10,_SRR1(r1)
867 mtsrr1 r10
868
869 addi r1,r1,PROM_FRAME_SIZE
870 ld r0,16(r1)
871 mtlr r0
872 blr