[POWERPC] Fix performance monitor exception
[linux-2.6-block.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
5 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
6 * Adapted for Power Macintosh by Paul Mackerras.
7 * Low-level exception handlers and MMU support
8 * rewritten by Paul Mackerras.
9 * Copyright (C) 1996 Paul Mackerras.
10 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 * This file contains the system call entry code, context switch
13 * code, and exception/interrupt return code for PowerPC.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
9994a338
PM
21#include <linux/errno.h>
22#include <asm/unistd.h>
23#include <asm/processor.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/cputable.h>
3f639ee8 30#include <asm/firmware.h>
007d88d0 31#include <asm/bug.h>
9994a338
PM
32
33/*
34 * System calls.
35 */
36 .section ".toc","aw"
37.SYS_CALL_TABLE:
38 .tc .sys_call_table[TC],.sys_call_table
39
40/* This value is used to mark exception frames on the stack. */
41exception_marker:
42 .tc ID_72656773_68657265[TC],0x7265677368657265
43
44 .section ".text"
45 .align 7
46
47#undef SHOW_SYSCALLS
48
49 .globl system_call_common
50system_call_common:
51 andi. r10,r12,MSR_PR
52 mr r10,r1
53 addi r1,r1,-INT_FRAME_SIZE
54 beq- 1f
55 ld r1,PACAKSAVE(r13)
561: std r10,0(r1)
bd19c899 57 crclr so
9994a338
PM
58 std r11,_NIP(r1)
59 std r12,_MSR(r1)
60 std r0,GPR0(r1)
61 std r10,GPR1(r1)
c6622f63 62 ACCOUNT_CPU_USER_ENTRY(r10, r11)
9994a338
PM
63 std r2,GPR2(r1)
64 std r3,GPR3(r1)
65 std r4,GPR4(r1)
66 std r5,GPR5(r1)
67 std r6,GPR6(r1)
68 std r7,GPR7(r1)
69 std r8,GPR8(r1)
70 li r11,0
71 std r11,GPR9(r1)
72 std r11,GPR10(r1)
73 std r11,GPR11(r1)
74 std r11,GPR12(r1)
75 std r9,GPR13(r1)
9994a338
PM
76 mfcr r9
77 mflr r10
78 li r11,0xc01
79 std r9,_CCR(r1)
80 std r10,_LINK(r1)
81 std r11,_TRAP(r1)
82 mfxer r9
83 mfctr r10
84 std r9,_XER(r1)
85 std r10,_CTR(r1)
86 std r3,ORIG_GPR3(r1)
87 ld r2,PACATOC(r13)
88 addi r9,r1,STACK_FRAME_OVERHEAD
89 ld r11,exception_marker@toc(r2)
90 std r11,-16(r9) /* "regshere" marker */
d04c56f7
PM
91 li r10,1
92 stb r10,PACASOFTIRQEN(r13)
93 stb r10,PACAHARDIRQEN(r13)
94 std r10,SOFTE(r1)
9994a338 95#ifdef CONFIG_PPC_ISERIES
3f639ee8 96BEGIN_FW_FTR_SECTION
9994a338
PM
97 /* Hack for handling interrupts when soft-enabling on iSeries */
98 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
99 andi. r10,r12,MSR_PR /* from kernel */
100 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
c705677e
SR
101 bne 2f
102 b hardware_interrupt_entry
1032:
3f639ee8 104END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338
PM
105#endif
106 mfmsr r11
107 ori r11,r11,MSR_EE
108 mtmsrd r11,1
109
110#ifdef SHOW_SYSCALLS
111 bl .do_show_syscall
112 REST_GPR(0,r1)
113 REST_4GPRS(3,r1)
114 REST_2GPRS(7,r1)
115 addi r9,r1,STACK_FRAME_OVERHEAD
116#endif
117 clrrdi r11,r1,THREAD_SHIFT
9994a338 118 ld r10,TI_FLAGS(r11)
9994a338
PM
119 andi. r11,r10,_TIF_SYSCALL_T_OR_A
120 bne- syscall_dotrace
121syscall_dotrace_cont:
122 cmpldi 0,r0,NR_syscalls
123 bge- syscall_enosys
124
125system_call: /* label this so stack traces look sane */
126/*
127 * Need to vector to 32 Bit or default sys_call_table here,
128 * based on caller's run-mode / personality.
129 */
130 ld r11,.SYS_CALL_TABLE@toc(2)
131 andi. r10,r10,_TIF_32BIT
132 beq 15f
133 addi r11,r11,8 /* use 32-bit syscall entries */
134 clrldi r3,r3,32
135 clrldi r4,r4,32
136 clrldi r5,r5,32
137 clrldi r6,r6,32
138 clrldi r7,r7,32
139 clrldi r8,r8,32
14015:
141 slwi r0,r0,4
142 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
143 mtctr r10
144 bctrl /* Call handler */
145
146syscall_exit:
401d1f02 147 std r3,RESULT(r1)
9994a338 148#ifdef SHOW_SYSCALLS
9994a338 149 bl .do_show_syscall_exit
401d1f02 150 ld r3,RESULT(r1)
9994a338 151#endif
9994a338 152 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
153
154 /* disable interrupts so current_thread_info()->flags can't change,
155 and so that we don't get interrupted after loading SRR0/1. */
156 ld r8,_MSR(r1)
157 andi. r10,r8,MSR_RI
158 beq- unrecov_restore
159 mfmsr r10
160 rldicl r10,r10,48,1
161 rotldi r10,r10,16
162 mtmsrd r10,1
163 ld r9,TI_FLAGS(r12)
401d1f02 164 li r11,-_LAST_ERRNO
1bd79336 165 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
9994a338 166 bne- syscall_exit_work
401d1f02
DW
167 cmpld r3,r11
168 ld r5,_CCR(r1)
169 bge- syscall_error
170syscall_error_cont:
9994a338
PM
171 ld r7,_NIP(r1)
172 stdcx. r0,0,r1 /* to clear the reservation */
173 andi. r6,r8,MSR_PR
174 ld r4,_LINK(r1)
c6622f63
PM
175 beq- 1f
176 ACCOUNT_CPU_USER_EXIT(r11, r12)
177 ld r13,GPR13(r1) /* only restore r13 if returning to usermode */
9994a338
PM
1781: ld r2,GPR2(r1)
179 li r12,MSR_RI
3eb6f26b
PM
180 andc r11,r10,r12
181 mtmsrd r11,1 /* clear MSR.RI */
9994a338
PM
182 ld r1,GPR1(r1)
183 mtlr r4
184 mtcr r5
185 mtspr SPRN_SRR0,r7
186 mtspr SPRN_SRR1,r8
187 rfid
188 b . /* prevent speculative execution */
189
401d1f02 190syscall_error:
9994a338 191 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 192 neg r3,r3
9994a338
PM
193 std r5,_CCR(r1)
194 b syscall_error_cont
401d1f02 195
9994a338
PM
196/* Traced system call support */
197syscall_dotrace:
198 bl .save_nvgprs
199 addi r3,r1,STACK_FRAME_OVERHEAD
200 bl .do_syscall_trace_enter
201 ld r0,GPR0(r1) /* Restore original registers */
202 ld r3,GPR3(r1)
203 ld r4,GPR4(r1)
204 ld r5,GPR5(r1)
205 ld r6,GPR6(r1)
206 ld r7,GPR7(r1)
207 ld r8,GPR8(r1)
208 addi r9,r1,STACK_FRAME_OVERHEAD
209 clrrdi r10,r1,THREAD_SHIFT
210 ld r10,TI_FLAGS(r10)
211 b syscall_dotrace_cont
212
401d1f02
DW
213syscall_enosys:
214 li r3,-ENOSYS
215 b syscall_exit
216
217syscall_exit_work:
218 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
219 If TIF_NOERROR is set, just save r3 as it is. */
220
221 andi. r0,r9,_TIF_RESTOREALL
1bd79336
PM
222 beq+ 0f
223 REST_NVGPRS(r1)
224 b 2f
2250: cmpld r3,r11 /* r10 is -LAST_ERRNO */
401d1f02
DW
226 blt+ 1f
227 andi. r0,r9,_TIF_NOERROR
228 bne- 1f
229 ld r5,_CCR(r1)
230 neg r3,r3
231 oris r5,r5,0x1000 /* Set SO bit in CR */
232 std r5,_CCR(r1)
2331: std r3,GPR3(r1)
2342: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
235 beq 4f
236
1bd79336 237 /* Clear per-syscall TIF flags if any are set. */
401d1f02
DW
238
239 li r11,_TIF_PERSYSCALL_MASK
240 addi r12,r12,TI_FLAGS
2413: ldarx r10,0,r12
242 andc r10,r10,r11
243 stdcx. r10,0,r12
244 bne- 3b
245 subi r12,r12,TI_FLAGS
1bd79336
PM
246
2474: /* Anything else left to do? */
248 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
401d1f02
DW
249 beq .ret_from_except_lite
250
251 /* Re-enable interrupts */
252 mfmsr r10
253 ori r10,r10,MSR_EE
254 mtmsrd r10,1
255
1bd79336 256 bl .save_nvgprs
9994a338
PM
257 addi r3,r1,STACK_FRAME_OVERHEAD
258 bl .do_syscall_trace_leave
1bd79336 259 b .ret_from_except
9994a338
PM
260
261/* Save non-volatile GPRs, if not already saved. */
262_GLOBAL(save_nvgprs)
263 ld r11,_TRAP(r1)
264 andi. r0,r11,1
265 beqlr-
266 SAVE_NVGPRS(r1)
267 clrrdi r0,r11,1
268 std r0,_TRAP(r1)
269 blr
270
401d1f02 271
9994a338
PM
272/*
273 * The sigsuspend and rt_sigsuspend system calls can call do_signal
274 * and thus put the process into the stopped state where we might
275 * want to examine its user state with ptrace. Therefore we need
276 * to save all the nonvolatile registers (r14 - r31) before calling
277 * the C code. Similarly, fork, vfork and clone need the full
278 * register state on the stack so that it can be copied to the child.
279 */
9994a338
PM
280
281_GLOBAL(ppc_fork)
282 bl .save_nvgprs
283 bl .sys_fork
284 b syscall_exit
285
286_GLOBAL(ppc_vfork)
287 bl .save_nvgprs
288 bl .sys_vfork
289 b syscall_exit
290
291_GLOBAL(ppc_clone)
292 bl .save_nvgprs
293 bl .sys_clone
294 b syscall_exit
295
1bd79336
PM
296_GLOBAL(ppc32_swapcontext)
297 bl .save_nvgprs
298 bl .compat_sys_swapcontext
299 b syscall_exit
300
301_GLOBAL(ppc64_swapcontext)
302 bl .save_nvgprs
303 bl .sys_swapcontext
304 b syscall_exit
305
9994a338
PM
306_GLOBAL(ret_from_fork)
307 bl .schedule_tail
308 REST_NVGPRS(r1)
309 li r3,0
310 b syscall_exit
311
312/*
313 * This routine switches between two different tasks. The process
314 * state of one is saved on its kernel stack. Then the state
315 * of the other is restored from its kernel stack. The memory
316 * management hardware is updated to the second process's state.
317 * Finally, we can return to the second process, via ret_from_except.
318 * On entry, r3 points to the THREAD for the current task, r4
319 * points to the THREAD for the new task.
320 *
321 * Note: there are two ways to get to the "going out" portion
322 * of this code; either by coming in via the entry (_switch)
323 * or via "fork" which must set up an environment equivalent
324 * to the "_switch" path. If you change this you'll have to change
325 * the fork code also.
326 *
327 * The code which creates the new task context is in 'copy_thread'
2ef9481e 328 * in arch/powerpc/kernel/process.c
9994a338
PM
329 */
330 .align 7
331_GLOBAL(_switch)
332 mflr r0
333 std r0,16(r1)
334 stdu r1,-SWITCH_FRAME_SIZE(r1)
335 /* r3-r13 are caller saved -- Cort */
336 SAVE_8GPRS(14, r1)
337 SAVE_10GPRS(22, r1)
338 mflr r20 /* Return to switch caller */
339 mfmsr r22
340 li r0, MSR_FP
341#ifdef CONFIG_ALTIVEC
342BEGIN_FTR_SECTION
343 oris r0,r0,MSR_VEC@h /* Disable altivec */
344 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
345 std r24,THREAD_VRSAVE(r3)
346END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
347#endif /* CONFIG_ALTIVEC */
348 and. r0,r0,r22
349 beq+ 1f
350 andc r22,r22,r0
351 mtmsrd r22
352 isync
3531: std r20,_NIP(r1)
354 mfcr r23
355 std r23,_CCR(r1)
356 std r1,KSP(r3) /* Set old stack pointer */
357
358#ifdef CONFIG_SMP
359 /* We need a sync somewhere here to make sure that if the
360 * previous task gets rescheduled on another CPU, it sees all
361 * stores it has performed on this one.
362 */
363 sync
364#endif /* CONFIG_SMP */
365
366 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
367 std r6,PACACURRENT(r13) /* Set new 'current' */
368
369 ld r8,KSP(r4) /* new stack pointer */
370BEGIN_FTR_SECTION
371 clrrdi r6,r8,28 /* get its ESID */
372 clrrdi r9,r1,28 /* get current sp ESID */
373 clrldi. r0,r6,2 /* is new ESID c00000000? */
374 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
375 cror eq,4*cr1+eq,eq
376 beq 2f /* if yes, don't slbie it */
377
378 /* Bolt in the new stack SLB entry */
379 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
380 oris r0,r6,(SLB_ESID_V)@h
381 ori r0,r0,(SLB_NUM_BOLTED-1)@l
2f6093c8
MN
382
383 /* Update the last bolted SLB */
384 ld r9,PACA_SLBSHADOWPTR(r13)
11a27ad7
MN
385 li r12,0
386 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
387 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
388 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
2f6093c8 389
9994a338
PM
390 slbie r6
391 slbie r6 /* Workaround POWER5 < DD2.1 issue */
392 slbmte r7,r0
393 isync
394
3952:
396END_FTR_SECTION_IFSET(CPU_FTR_SLB)
397 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
398 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
399 because we don't need to leave the 288-byte ABI gap at the
400 top of the kernel stack. */
401 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
402
403 mr r1,r8 /* start using new stack pointer */
404 std r7,PACAKSAVE(r13)
405
406 ld r6,_CCR(r1)
407 mtcrf 0xFF,r6
408
409#ifdef CONFIG_ALTIVEC
410BEGIN_FTR_SECTION
411 ld r0,THREAD_VRSAVE(r4)
412 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
413END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
414#endif /* CONFIG_ALTIVEC */
415
416 /* r3-r13 are destroyed -- Cort */
417 REST_8GPRS(14, r1)
418 REST_10GPRS(22, r1)
419
420 /* convert old thread to its task_struct for return value */
421 addi r3,r3,-THREAD
422 ld r7,_NIP(r1) /* Return to _switch caller in new task */
423 mtlr r7
424 addi r1,r1,SWITCH_FRAME_SIZE
425 blr
426
427 .align 7
428_GLOBAL(ret_from_except)
429 ld r11,_TRAP(r1)
430 andi. r0,r11,1
431 bne .ret_from_except_lite
432 REST_NVGPRS(r1)
433
434_GLOBAL(ret_from_except_lite)
435 /*
436 * Disable interrupts so that current_thread_info()->flags
437 * can't change between when we test it and when we return
438 * from the interrupt.
439 */
440 mfmsr r10 /* Get current interrupt state */
441 rldicl r9,r10,48,1 /* clear MSR_EE */
442 rotldi r9,r9,16
443 mtmsrd r9,1 /* Update machine state */
444
445#ifdef CONFIG_PREEMPT
446 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
447 li r0,_TIF_NEED_RESCHED /* bits to check */
448 ld r3,_MSR(r1)
449 ld r4,TI_FLAGS(r9)
450 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
451 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
452 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
453 bne do_work
454
455#else /* !CONFIG_PREEMPT */
456 ld r3,_MSR(r1) /* Returning to user mode? */
457 andi. r3,r3,MSR_PR
458 beq restore /* if not, just restore regs and return */
459
460 /* Check current_thread_info()->flags */
461 clrrdi r9,r1,THREAD_SHIFT
462 ld r4,TI_FLAGS(r9)
463 andi. r0,r4,_TIF_USER_WORK_MASK
464 bne do_work
465#endif
466
467restore:
d04c56f7 468 ld r5,SOFTE(r1)
9994a338 469#ifdef CONFIG_PPC_ISERIES
3f639ee8 470BEGIN_FW_FTR_SECTION
9994a338
PM
471 cmpdi 0,r5,0
472 beq 4f
473 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
474 ld r3,PACALPPACAPTR(r13)
475 ld r3,LPPACAANYINT(r3)
9994a338
PM
476 cmpdi r3,0
477 beq+ 4f /* skip do_IRQ if no interrupts */
478
479 li r3,0
d04c56f7 480 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
9994a338
PM
481 ori r10,r10,MSR_EE
482 mtmsrd r10 /* hard-enable again */
483 addi r3,r1,STACK_FRAME_OVERHEAD
484 bl .do_IRQ
485 b .ret_from_except_lite /* loop back and handle more */
d04c56f7 4864:
3f639ee8 487END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
9994a338 488#endif
d04c56f7 489 stb r5,PACASOFTIRQEN(r13)
9994a338
PM
490
491 ld r3,_MSR(r1)
492 andi. r0,r3,MSR_RI
493 beq- unrecov_restore
494
b0a779de
PM
495 /* extract EE bit and use it to restore paca->hard_enabled */
496 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
497 stb r4,PACAHARDIRQEN(r13)
498
9994a338
PM
499 andi. r0,r3,MSR_PR
500
501 /*
502 * r13 is our per cpu area, only restore it if we are returning to
503 * userspace
504 */
505 beq 1f
c6622f63 506 ACCOUNT_CPU_USER_EXIT(r3, r4)
9994a338
PM
507 REST_GPR(13, r1)
5081:
509 ld r3,_CTR(r1)
510 ld r0,_LINK(r1)
511 mtctr r3
512 mtlr r0
513 ld r3,_XER(r1)
514 mtspr SPRN_XER,r3
515
516 REST_8GPRS(5, r1)
517
518 stdcx. r0,0,r1 /* to clear the reservation */
519
520 mfmsr r0
521 li r2, MSR_RI
522 andc r0,r0,r2
523 mtmsrd r0,1
524
525 ld r0,_MSR(r1)
526 mtspr SPRN_SRR1,r0
527
528 ld r2,_CCR(r1)
529 mtcrf 0xFF,r2
530 ld r2,_NIP(r1)
531 mtspr SPRN_SRR0,r2
532
533 ld r0,GPR0(r1)
534 ld r2,GPR2(r1)
535 ld r3,GPR3(r1)
536 ld r4,GPR4(r1)
537 ld r1,GPR1(r1)
538
539 rfid
540 b . /* prevent speculative execution */
541
542/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
543do_work:
544#ifdef CONFIG_PREEMPT
545 andi. r0,r3,MSR_PR /* Returning to user mode? */
546 bne user_work
547 /* Check that preempt_count() == 0 and interrupts are enabled */
548 lwz r8,TI_PREEMPT(r9)
549 cmpwi cr1,r8,0
9994a338
PM
550 ld r0,SOFTE(r1)
551 cmpdi r0,0
9994a338
PM
552 crandc eq,cr1*4+eq,eq
553 bne restore
554 /* here we are preempting the current task */
5551:
9994a338 556 li r0,1
d04c56f7
PM
557 stb r0,PACASOFTIRQEN(r13)
558 stb r0,PACAHARDIRQEN(r13)
9994a338
PM
559 ori r10,r10,MSR_EE
560 mtmsrd r10,1 /* reenable interrupts */
561 bl .preempt_schedule
562 mfmsr r10
563 clrrdi r9,r1,THREAD_SHIFT
564 rldicl r10,r10,48,1 /* disable interrupts again */
565 rotldi r10,r10,16
566 mtmsrd r10,1
567 ld r4,TI_FLAGS(r9)
568 andi. r0,r4,_TIF_NEED_RESCHED
569 bne 1b
570 b restore
571
572user_work:
573#endif
574 /* Enable interrupts */
575 ori r10,r10,MSR_EE
576 mtmsrd r10,1
577
578 andi. r0,r4,_TIF_NEED_RESCHED
579 beq 1f
580 bl .schedule
581 b .ret_from_except_lite
582
5831: bl .save_nvgprs
584 li r3,0
585 addi r4,r1,STACK_FRAME_OVERHEAD
586 bl .do_signal
587 b .ret_from_except
588
589unrecov_restore:
590 addi r3,r1,STACK_FRAME_OVERHEAD
591 bl .unrecoverable_exception
592 b unrecov_restore
593
594#ifdef CONFIG_PPC_RTAS
595/*
596 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
597 * called with the MMU off.
598 *
599 * In addition, we need to be in 32b mode, at least for now.
600 *
601 * Note: r3 is an input parameter to rtas, so don't trash it...
602 */
603_GLOBAL(enter_rtas)
604 mflr r0
605 std r0,16(r1)
606 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
607
608 /* Because RTAS is running in 32b mode, it clobbers the high order half
609 * of all registers that it saves. We therefore save those registers
610 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
611 */
612 SAVE_GPR(2, r1) /* Save the TOC */
613 SAVE_GPR(13, r1) /* Save paca */
614 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
615 SAVE_10GPRS(22, r1) /* ditto */
616
617 mfcr r4
618 std r4,_CCR(r1)
619 mfctr r5
620 std r5,_CTR(r1)
621 mfspr r6,SPRN_XER
622 std r6,_XER(r1)
623 mfdar r7
624 std r7,_DAR(r1)
625 mfdsisr r8
626 std r8,_DSISR(r1)
627 mfsrr0 r9
628 std r9,_SRR0(r1)
629 mfsrr1 r10
630 std r10,_SRR1(r1)
631
9fe901d1
MK
632 /* Temporary workaround to clear CR until RTAS can be modified to
633 * ignore all bits.
634 */
635 li r0,0
636 mtcr r0
637
007d88d0 638#ifdef CONFIG_BUG
9994a338
PM
639 /* There is no way it is acceptable to get here with interrupts enabled,
640 * check it with the asm equivalent of WARN_ON
641 */
d04c56f7 642 lbz r0,PACASOFTIRQEN(r13)
9994a338 6431: tdnei r0,0
007d88d0
DW
644 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
645#endif
646
d04c56f7
PM
647 /* Hard-disable interrupts */
648 mfmsr r6
649 rldicl r7,r6,48,1
650 rotldi r7,r7,16
651 mtmsrd r7,1
652
9994a338
PM
653 /* Unfortunately, the stack pointer and the MSR are also clobbered,
654 * so they are saved in the PACA which allows us to restore
655 * our original state after RTAS returns.
656 */
657 std r1,PACAR1(r13)
658 std r6,PACASAVEDMSR(r13)
659
660 /* Setup our real return addr */
e58c3495
DG
661 LOAD_REG_ADDR(r4,.rtas_return_loc)
662 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
663 mtlr r4
664
665 li r0,0
666 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
667 andc r0,r6,r0
668
669 li r9,1
670 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
671 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
672 andc r6,r0,r9
673 ori r6,r6,MSR_RI
674 sync /* disable interrupts so SRR0/1 */
675 mtmsrd r0 /* don't get trashed */
676
e58c3495 677 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
678 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
679 ld r4,RTASBASE(r4) /* get the rtas->base value */
680
681 mtspr SPRN_SRR0,r5
682 mtspr SPRN_SRR1,r6
683 rfid
684 b . /* prevent speculative execution */
685
686_STATIC(rtas_return_loc)
687 /* relocation is off at this point */
688 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 689 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
690
691 mfmsr r6
692 li r0,MSR_RI
693 andc r6,r6,r0
694 sync
695 mtmsrd r6
696
697 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 698 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
699 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
700
701 mtspr SPRN_SRR0,r3
702 mtspr SPRN_SRR1,r4
703 rfid
704 b . /* prevent speculative execution */
705
706_STATIC(rtas_restore_regs)
707 /* relocation is on at this point */
708 REST_GPR(2, r1) /* Restore the TOC */
709 REST_GPR(13, r1) /* Restore paca */
710 REST_8GPRS(14, r1) /* Restore the non-volatiles */
711 REST_10GPRS(22, r1) /* ditto */
712
713 mfspr r13,SPRN_SPRG3
714
715 ld r4,_CCR(r1)
716 mtcr r4
717 ld r5,_CTR(r1)
718 mtctr r5
719 ld r6,_XER(r1)
720 mtspr SPRN_XER,r6
721 ld r7,_DAR(r1)
722 mtdar r7
723 ld r8,_DSISR(r1)
724 mtdsisr r8
725 ld r9,_SRR0(r1)
726 mtsrr0 r9
727 ld r10,_SRR1(r1)
728 mtsrr1 r10
729
730 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
731 ld r0,16(r1) /* get return address */
732
733 mtlr r0
734 blr /* return to caller */
735
736#endif /* CONFIG_PPC_RTAS */
737
9994a338
PM
738_GLOBAL(enter_prom)
739 mflr r0
740 std r0,16(r1)
741 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
742
743 /* Because PROM is running in 32b mode, it clobbers the high order half
744 * of all registers that it saves. We therefore save those registers
745 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
746 */
747 SAVE_8GPRS(2, r1)
748 SAVE_GPR(13, r1)
749 SAVE_8GPRS(14, r1)
750 SAVE_10GPRS(22, r1)
751 mfcr r4
752 std r4,_CCR(r1)
753 mfctr r5
754 std r5,_CTR(r1)
755 mfspr r6,SPRN_XER
756 std r6,_XER(r1)
757 mfdar r7
758 std r7,_DAR(r1)
759 mfdsisr r8
760 std r8,_DSISR(r1)
761 mfsrr0 r9
762 std r9,_SRR0(r1)
763 mfsrr1 r10
764 std r10,_SRR1(r1)
765 mfmsr r11
766 std r11,_MSR(r1)
767
768 /* Get the PROM entrypoint */
769 ld r0,GPR4(r1)
770 mtlr r0
771
772 /* Switch MSR to 32 bits mode
773 */
774 mfmsr r11
775 li r12,1
776 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
777 andc r11,r11,r12
778 li r12,1
779 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
780 andc r11,r11,r12
781 mtmsrd r11
782 isync
783
784 /* Restore arguments & enter PROM here... */
785 ld r3,GPR3(r1)
786 blrl
787
788 /* Just make sure that r1 top 32 bits didn't get
789 * corrupt by OF
790 */
791 rldicl r1,r1,0,32
792
793 /* Restore the MSR (back to 64 bits) */
794 ld r0,_MSR(r1)
795 mtmsrd r0
796 isync
797
798 /* Restore other registers */
799 REST_GPR(2, r1)
800 REST_GPR(13, r1)
801 REST_8GPRS(14, r1)
802 REST_10GPRS(22, r1)
803 ld r4,_CCR(r1)
804 mtcr r4
805 ld r5,_CTR(r1)
806 mtctr r5
807 ld r6,_XER(r1)
808 mtspr SPRN_XER,r6
809 ld r7,_DAR(r1)
810 mtdar r7
811 ld r8,_DSISR(r1)
812 mtdsisr r8
813 ld r9,_SRR0(r1)
814 mtsrr0 r9
815 ld r10,_SRR1(r1)
816 mtsrr1 r10
817
818 addi r1,r1,PROM_FRAME_SIZE
819 ld r0,16(r1)
820 mtlr r0
821 blr