[PATCH] powerpc: incorrect rmo_top handling in prom_init
[linux-2.6-block.git] / arch / powerpc / kernel / entry_64.S
CommitLineData
9994a338
PM
1/*
2 * arch/ppc64/kernel/entry.S
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the system call entry code, context switch
15 * code, and exception/interrupt return code for PowerPC.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 */
22
23#include <linux/config.h>
24#include <linux/errno.h>
25#include <asm/unistd.h>
26#include <asm/processor.h>
27#include <asm/page.h>
28#include <asm/mmu.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/cputable.h>
33
34#ifdef CONFIG_PPC_ISERIES
35#define DO_SOFT_DISABLE
36#endif
37
38/*
39 * System calls.
40 */
41 .section ".toc","aw"
42.SYS_CALL_TABLE:
43 .tc .sys_call_table[TC],.sys_call_table
44
45/* This value is used to mark exception frames on the stack. */
46exception_marker:
47 .tc ID_72656773_68657265[TC],0x7265677368657265
48
49 .section ".text"
50 .align 7
51
52#undef SHOW_SYSCALLS
53
54 .globl system_call_common
55system_call_common:
56 andi. r10,r12,MSR_PR
57 mr r10,r1
58 addi r1,r1,-INT_FRAME_SIZE
59 beq- 1f
60 ld r1,PACAKSAVE(r13)
611: std r10,0(r1)
62 std r11,_NIP(r1)
63 std r12,_MSR(r1)
64 std r0,GPR0(r1)
65 std r10,GPR1(r1)
66 std r2,GPR2(r1)
67 std r3,GPR3(r1)
68 std r4,GPR4(r1)
69 std r5,GPR5(r1)
70 std r6,GPR6(r1)
71 std r7,GPR7(r1)
72 std r8,GPR8(r1)
73 li r11,0
74 std r11,GPR9(r1)
75 std r11,GPR10(r1)
76 std r11,GPR11(r1)
77 std r11,GPR12(r1)
78 std r9,GPR13(r1)
79 crclr so
80 mfcr r9
81 mflr r10
82 li r11,0xc01
83 std r9,_CCR(r1)
84 std r10,_LINK(r1)
85 std r11,_TRAP(r1)
86 mfxer r9
87 mfctr r10
88 std r9,_XER(r1)
89 std r10,_CTR(r1)
90 std r3,ORIG_GPR3(r1)
91 ld r2,PACATOC(r13)
92 addi r9,r1,STACK_FRAME_OVERHEAD
93 ld r11,exception_marker@toc(r2)
94 std r11,-16(r9) /* "regshere" marker */
95#ifdef CONFIG_PPC_ISERIES
96 /* Hack for handling interrupts when soft-enabling on iSeries */
97 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
98 andi. r10,r12,MSR_PR /* from kernel */
99 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
100 beq hardware_interrupt_entry
101 lbz r10,PACAPROCENABLED(r13)
102 std r10,SOFTE(r1)
103#endif
104 mfmsr r11
105 ori r11,r11,MSR_EE
106 mtmsrd r11,1
107
108#ifdef SHOW_SYSCALLS
109 bl .do_show_syscall
110 REST_GPR(0,r1)
111 REST_4GPRS(3,r1)
112 REST_2GPRS(7,r1)
113 addi r9,r1,STACK_FRAME_OVERHEAD
114#endif
115 clrrdi r11,r1,THREAD_SHIFT
9994a338 116 ld r10,TI_FLAGS(r11)
9994a338
PM
117 andi. r11,r10,_TIF_SYSCALL_T_OR_A
118 bne- syscall_dotrace
119syscall_dotrace_cont:
120 cmpldi 0,r0,NR_syscalls
121 bge- syscall_enosys
122
123system_call: /* label this so stack traces look sane */
124/*
125 * Need to vector to 32 Bit or default sys_call_table here,
126 * based on caller's run-mode / personality.
127 */
128 ld r11,.SYS_CALL_TABLE@toc(2)
129 andi. r10,r10,_TIF_32BIT
130 beq 15f
131 addi r11,r11,8 /* use 32-bit syscall entries */
132 clrldi r3,r3,32
133 clrldi r4,r4,32
134 clrldi r5,r5,32
135 clrldi r6,r6,32
136 clrldi r7,r7,32
137 clrldi r8,r8,32
13815:
139 slwi r0,r0,4
140 ldx r10,r11,r0 /* Fetch system call handler [ptr] */
141 mtctr r10
142 bctrl /* Call handler */
143
144syscall_exit:
401d1f02 145 std r3,RESULT(r1)
9994a338 146#ifdef SHOW_SYSCALLS
9994a338 147 bl .do_show_syscall_exit
401d1f02 148 ld r3,RESULT(r1)
9994a338 149#endif
9994a338 150 clrrdi r12,r1,THREAD_SHIFT
9994a338
PM
151
152 /* disable interrupts so current_thread_info()->flags can't change,
153 and so that we don't get interrupted after loading SRR0/1. */
154 ld r8,_MSR(r1)
155 andi. r10,r8,MSR_RI
156 beq- unrecov_restore
157 mfmsr r10
158 rldicl r10,r10,48,1
159 rotldi r10,r10,16
160 mtmsrd r10,1
161 ld r9,TI_FLAGS(r12)
401d1f02 162 li r11,-_LAST_ERRNO
f27201da 163 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK)
9994a338 164 bne- syscall_exit_work
401d1f02
DW
165 cmpld r3,r11
166 ld r5,_CCR(r1)
167 bge- syscall_error
168syscall_error_cont:
9994a338
PM
169 ld r7,_NIP(r1)
170 stdcx. r0,0,r1 /* to clear the reservation */
171 andi. r6,r8,MSR_PR
172 ld r4,_LINK(r1)
173 beq- 1f /* only restore r13 if */
174 ld r13,GPR13(r1) /* returning to usermode */
1751: ld r2,GPR2(r1)
176 li r12,MSR_RI
3eb6f26b
PM
177 andc r11,r10,r12
178 mtmsrd r11,1 /* clear MSR.RI */
9994a338
PM
179 ld r1,GPR1(r1)
180 mtlr r4
181 mtcr r5
182 mtspr SPRN_SRR0,r7
183 mtspr SPRN_SRR1,r8
184 rfid
185 b . /* prevent speculative execution */
186
401d1f02 187syscall_error:
9994a338 188 oris r5,r5,0x1000 /* Set SO bit in CR */
401d1f02 189 neg r3,r3
9994a338
PM
190 std r5,_CCR(r1)
191 b syscall_error_cont
401d1f02 192
9994a338
PM
193/* Traced system call support */
194syscall_dotrace:
195 bl .save_nvgprs
196 addi r3,r1,STACK_FRAME_OVERHEAD
197 bl .do_syscall_trace_enter
198 ld r0,GPR0(r1) /* Restore original registers */
199 ld r3,GPR3(r1)
200 ld r4,GPR4(r1)
201 ld r5,GPR5(r1)
202 ld r6,GPR6(r1)
203 ld r7,GPR7(r1)
204 ld r8,GPR8(r1)
205 addi r9,r1,STACK_FRAME_OVERHEAD
206 clrrdi r10,r1,THREAD_SHIFT
207 ld r10,TI_FLAGS(r10)
208 b syscall_dotrace_cont
209
401d1f02
DW
210syscall_enosys:
211 li r3,-ENOSYS
212 b syscall_exit
213
214syscall_exit_work:
215 /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
216 If TIF_NOERROR is set, just save r3 as it is. */
217
218 andi. r0,r9,_TIF_RESTOREALL
219 bne- 2f
220 cmpld r3,r11 /* r10 is -LAST_ERRNO */
221 blt+ 1f
222 andi. r0,r9,_TIF_NOERROR
223 bne- 1f
224 ld r5,_CCR(r1)
225 neg r3,r3
226 oris r5,r5,0x1000 /* Set SO bit in CR */
227 std r5,_CCR(r1)
2281: std r3,GPR3(r1)
2292: andi. r0,r9,(_TIF_PERSYSCALL_MASK)
230 beq 4f
231
232 /* Clear per-syscall TIF flags if any are set, but _leave_
233 _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that
234 yet. */
235
236 li r11,_TIF_PERSYSCALL_MASK
237 addi r12,r12,TI_FLAGS
2383: ldarx r10,0,r12
239 andc r10,r10,r11
240 stdcx. r10,0,r12
241 bne- 3b
242 subi r12,r12,TI_FLAGS
243
bcb05504 2444: bl .save_nvgprs
401d1f02
DW
245 /* Anything else left to do? */
246 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS)
247 beq .ret_from_except_lite
248
249 /* Re-enable interrupts */
250 mfmsr r10
251 ori r10,r10,MSR_EE
252 mtmsrd r10,1
253
254 andi. r0,r9,_TIF_SAVE_NVGPRS
255 bne save_user_nvgprs
256
257 /* If tracing, re-enable interrupts and do it */
258save_user_nvgprs_cont:
259 andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
260 beq 5f
261
9994a338
PM
262 addi r3,r1,STACK_FRAME_OVERHEAD
263 bl .do_syscall_trace_leave
264 REST_NVGPRS(r1)
9994a338 265 clrrdi r12,r1,THREAD_SHIFT
9994a338 266
401d1f02
DW
267 /* Disable interrupts again and handle other work if any */
2685: mfmsr r10
269 rldicl r10,r10,48,1
270 rotldi r10,r10,16
271 mtmsrd r10,1
272
9994a338
PM
273 b .ret_from_except_lite
274
275/* Save non-volatile GPRs, if not already saved. */
276_GLOBAL(save_nvgprs)
277 ld r11,_TRAP(r1)
278 andi. r0,r11,1
279 beqlr-
280 SAVE_NVGPRS(r1)
281 clrrdi r0,r11,1
282 std r0,_TRAP(r1)
283 blr
284
401d1f02
DW
285
286save_user_nvgprs:
287 ld r10,TI_SIGFRAME(r12)
288 andi. r0,r9,_TIF_32BIT
289 beq- save_user_nvgprs_64
290
291 /* 32-bit save to userspace */
292
293.macro savewords start, end
294 1: stw \start,4*(\start)(r10)
295 .section __ex_table,"a"
296 .align 3
297 .llong 1b,save_user_nvgprs_fault
298 .previous
299 .if \end - \start
300 savewords "(\start+1)",\end
301 .endif
302.endm
303 savewords 14,31
304 b save_user_nvgprs_cont
305
306save_user_nvgprs_64:
307 /* 64-bit save to userspace */
308
309.macro savelongs start, end
310 1: std \start,8*(\start)(r10)
311 .section __ex_table,"a"
312 .align 3
313 .llong 1b,save_user_nvgprs_fault
314 .previous
315 .if \end - \start
316 savelongs "(\start+1)",\end
317 .endif
318.endm
319 savelongs 14,31
320 b save_user_nvgprs_cont
321
322save_user_nvgprs_fault:
323 li r3,11 /* SIGSEGV */
324 ld r4,TI_TASK(r12)
325 bl .force_sigsegv
326
327 clrrdi r12,r1,THREAD_SHIFT
328 ld r9,TI_FLAGS(r12)
329 b save_user_nvgprs_cont
330
9994a338
PM
331/*
332 * The sigsuspend and rt_sigsuspend system calls can call do_signal
333 * and thus put the process into the stopped state where we might
334 * want to examine its user state with ptrace. Therefore we need
335 * to save all the nonvolatile registers (r14 - r31) before calling
336 * the C code. Similarly, fork, vfork and clone need the full
337 * register state on the stack so that it can be copied to the child.
338 */
9994a338
PM
339
340_GLOBAL(ppc_fork)
341 bl .save_nvgprs
342 bl .sys_fork
343 b syscall_exit
344
345_GLOBAL(ppc_vfork)
346 bl .save_nvgprs
347 bl .sys_vfork
348 b syscall_exit
349
350_GLOBAL(ppc_clone)
351 bl .save_nvgprs
352 bl .sys_clone
353 b syscall_exit
354
9994a338
PM
355_GLOBAL(ret_from_fork)
356 bl .schedule_tail
357 REST_NVGPRS(r1)
358 li r3,0
359 b syscall_exit
360
361/*
362 * This routine switches between two different tasks. The process
363 * state of one is saved on its kernel stack. Then the state
364 * of the other is restored from its kernel stack. The memory
365 * management hardware is updated to the second process's state.
366 * Finally, we can return to the second process, via ret_from_except.
367 * On entry, r3 points to the THREAD for the current task, r4
368 * points to the THREAD for the new task.
369 *
370 * Note: there are two ways to get to the "going out" portion
371 * of this code; either by coming in via the entry (_switch)
372 * or via "fork" which must set up an environment equivalent
373 * to the "_switch" path. If you change this you'll have to change
374 * the fork code also.
375 *
376 * The code which creates the new task context is in 'copy_thread'
377 * in arch/ppc64/kernel/process.c
378 */
379 .align 7
380_GLOBAL(_switch)
381 mflr r0
382 std r0,16(r1)
383 stdu r1,-SWITCH_FRAME_SIZE(r1)
384 /* r3-r13 are caller saved -- Cort */
385 SAVE_8GPRS(14, r1)
386 SAVE_10GPRS(22, r1)
387 mflr r20 /* Return to switch caller */
388 mfmsr r22
389 li r0, MSR_FP
390#ifdef CONFIG_ALTIVEC
391BEGIN_FTR_SECTION
392 oris r0,r0,MSR_VEC@h /* Disable altivec */
393 mfspr r24,SPRN_VRSAVE /* save vrsave register value */
394 std r24,THREAD_VRSAVE(r3)
395END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
396#endif /* CONFIG_ALTIVEC */
397 and. r0,r0,r22
398 beq+ 1f
399 andc r22,r22,r0
400 mtmsrd r22
401 isync
4021: std r20,_NIP(r1)
403 mfcr r23
404 std r23,_CCR(r1)
405 std r1,KSP(r3) /* Set old stack pointer */
406
407#ifdef CONFIG_SMP
408 /* We need a sync somewhere here to make sure that if the
409 * previous task gets rescheduled on another CPU, it sees all
410 * stores it has performed on this one.
411 */
412 sync
413#endif /* CONFIG_SMP */
414
415 addi r6,r4,-THREAD /* Convert THREAD to 'current' */
416 std r6,PACACURRENT(r13) /* Set new 'current' */
417
418 ld r8,KSP(r4) /* new stack pointer */
419BEGIN_FTR_SECTION
420 clrrdi r6,r8,28 /* get its ESID */
421 clrrdi r9,r1,28 /* get current sp ESID */
422 clrldi. r0,r6,2 /* is new ESID c00000000? */
423 cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */
424 cror eq,4*cr1+eq,eq
425 beq 2f /* if yes, don't slbie it */
426
427 /* Bolt in the new stack SLB entry */
428 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
429 oris r0,r6,(SLB_ESID_V)@h
430 ori r0,r0,(SLB_NUM_BOLTED-1)@l
431 slbie r6
432 slbie r6 /* Workaround POWER5 < DD2.1 issue */
433 slbmte r7,r0
434 isync
435
4362:
437END_FTR_SECTION_IFSET(CPU_FTR_SLB)
438 clrrdi r7,r8,THREAD_SHIFT /* base of new stack */
439 /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
440 because we don't need to leave the 288-byte ABI gap at the
441 top of the kernel stack. */
442 addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
443
444 mr r1,r8 /* start using new stack pointer */
445 std r7,PACAKSAVE(r13)
446
447 ld r6,_CCR(r1)
448 mtcrf 0xFF,r6
449
450#ifdef CONFIG_ALTIVEC
451BEGIN_FTR_SECTION
452 ld r0,THREAD_VRSAVE(r4)
453 mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */
454END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
455#endif /* CONFIG_ALTIVEC */
456
457 /* r3-r13 are destroyed -- Cort */
458 REST_8GPRS(14, r1)
459 REST_10GPRS(22, r1)
460
461 /* convert old thread to its task_struct for return value */
462 addi r3,r3,-THREAD
463 ld r7,_NIP(r1) /* Return to _switch caller in new task */
464 mtlr r7
465 addi r1,r1,SWITCH_FRAME_SIZE
466 blr
467
468 .align 7
469_GLOBAL(ret_from_except)
470 ld r11,_TRAP(r1)
471 andi. r0,r11,1
472 bne .ret_from_except_lite
473 REST_NVGPRS(r1)
474
475_GLOBAL(ret_from_except_lite)
476 /*
477 * Disable interrupts so that current_thread_info()->flags
478 * can't change between when we test it and when we return
479 * from the interrupt.
480 */
481 mfmsr r10 /* Get current interrupt state */
482 rldicl r9,r10,48,1 /* clear MSR_EE */
483 rotldi r9,r9,16
484 mtmsrd r9,1 /* Update machine state */
485
486#ifdef CONFIG_PREEMPT
487 clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */
488 li r0,_TIF_NEED_RESCHED /* bits to check */
489 ld r3,_MSR(r1)
490 ld r4,TI_FLAGS(r9)
491 /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */
492 rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING
493 and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */
494 bne do_work
495
496#else /* !CONFIG_PREEMPT */
497 ld r3,_MSR(r1) /* Returning to user mode? */
498 andi. r3,r3,MSR_PR
499 beq restore /* if not, just restore regs and return */
500
501 /* Check current_thread_info()->flags */
502 clrrdi r9,r1,THREAD_SHIFT
503 ld r4,TI_FLAGS(r9)
504 andi. r0,r4,_TIF_USER_WORK_MASK
505 bne do_work
506#endif
507
508restore:
509#ifdef CONFIG_PPC_ISERIES
510 ld r5,SOFTE(r1)
511 cmpdi 0,r5,0
512 beq 4f
513 /* Check for pending interrupts (iSeries) */
3356bb9f
DG
514 ld r3,PACALPPACAPTR(r13)
515 ld r3,LPPACAANYINT(r3)
9994a338
PM
516 cmpdi r3,0
517 beq+ 4f /* skip do_IRQ if no interrupts */
518
519 li r3,0
520 stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */
521 ori r10,r10,MSR_EE
522 mtmsrd r10 /* hard-enable again */
523 addi r3,r1,STACK_FRAME_OVERHEAD
524 bl .do_IRQ
525 b .ret_from_except_lite /* loop back and handle more */
526
5274: stb r5,PACAPROCENABLED(r13)
528#endif
529
530 ld r3,_MSR(r1)
531 andi. r0,r3,MSR_RI
532 beq- unrecov_restore
533
534 andi. r0,r3,MSR_PR
535
536 /*
537 * r13 is our per cpu area, only restore it if we are returning to
538 * userspace
539 */
540 beq 1f
541 REST_GPR(13, r1)
5421:
543 ld r3,_CTR(r1)
544 ld r0,_LINK(r1)
545 mtctr r3
546 mtlr r0
547 ld r3,_XER(r1)
548 mtspr SPRN_XER,r3
549
550 REST_8GPRS(5, r1)
551
552 stdcx. r0,0,r1 /* to clear the reservation */
553
554 mfmsr r0
555 li r2, MSR_RI
556 andc r0,r0,r2
557 mtmsrd r0,1
558
559 ld r0,_MSR(r1)
560 mtspr SPRN_SRR1,r0
561
562 ld r2,_CCR(r1)
563 mtcrf 0xFF,r2
564 ld r2,_NIP(r1)
565 mtspr SPRN_SRR0,r2
566
567 ld r0,GPR0(r1)
568 ld r2,GPR2(r1)
569 ld r3,GPR3(r1)
570 ld r4,GPR4(r1)
571 ld r1,GPR1(r1)
572
573 rfid
574 b . /* prevent speculative execution */
575
576/* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */
577do_work:
578#ifdef CONFIG_PREEMPT
579 andi. r0,r3,MSR_PR /* Returning to user mode? */
580 bne user_work
581 /* Check that preempt_count() == 0 and interrupts are enabled */
582 lwz r8,TI_PREEMPT(r9)
583 cmpwi cr1,r8,0
584#ifdef CONFIG_PPC_ISERIES
585 ld r0,SOFTE(r1)
586 cmpdi r0,0
587#else
588 andi. r0,r3,MSR_EE
589#endif
590 crandc eq,cr1*4+eq,eq
591 bne restore
592 /* here we are preempting the current task */
5931:
594#ifdef CONFIG_PPC_ISERIES
595 li r0,1
596 stb r0,PACAPROCENABLED(r13)
597#endif
598 ori r10,r10,MSR_EE
599 mtmsrd r10,1 /* reenable interrupts */
600 bl .preempt_schedule
601 mfmsr r10
602 clrrdi r9,r1,THREAD_SHIFT
603 rldicl r10,r10,48,1 /* disable interrupts again */
604 rotldi r10,r10,16
605 mtmsrd r10,1
606 ld r4,TI_FLAGS(r9)
607 andi. r0,r4,_TIF_NEED_RESCHED
608 bne 1b
609 b restore
610
611user_work:
612#endif
613 /* Enable interrupts */
614 ori r10,r10,MSR_EE
615 mtmsrd r10,1
616
617 andi. r0,r4,_TIF_NEED_RESCHED
618 beq 1f
619 bl .schedule
620 b .ret_from_except_lite
621
6221: bl .save_nvgprs
623 li r3,0
624 addi r4,r1,STACK_FRAME_OVERHEAD
625 bl .do_signal
626 b .ret_from_except
627
628unrecov_restore:
629 addi r3,r1,STACK_FRAME_OVERHEAD
630 bl .unrecoverable_exception
631 b unrecov_restore
632
633#ifdef CONFIG_PPC_RTAS
634/*
635 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
636 * called with the MMU off.
637 *
638 * In addition, we need to be in 32b mode, at least for now.
639 *
640 * Note: r3 is an input parameter to rtas, so don't trash it...
641 */
642_GLOBAL(enter_rtas)
643 mflr r0
644 std r0,16(r1)
645 stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
646
647 /* Because RTAS is running in 32b mode, it clobbers the high order half
648 * of all registers that it saves. We therefore save those registers
649 * RTAS might touch to the stack. (r0, r3-r13 are caller saved)
650 */
651 SAVE_GPR(2, r1) /* Save the TOC */
652 SAVE_GPR(13, r1) /* Save paca */
653 SAVE_8GPRS(14, r1) /* Save the non-volatiles */
654 SAVE_10GPRS(22, r1) /* ditto */
655
656 mfcr r4
657 std r4,_CCR(r1)
658 mfctr r5
659 std r5,_CTR(r1)
660 mfspr r6,SPRN_XER
661 std r6,_XER(r1)
662 mfdar r7
663 std r7,_DAR(r1)
664 mfdsisr r8
665 std r8,_DSISR(r1)
666 mfsrr0 r9
667 std r9,_SRR0(r1)
668 mfsrr1 r10
669 std r10,_SRR1(r1)
670
671 /* There is no way it is acceptable to get here with interrupts enabled,
672 * check it with the asm equivalent of WARN_ON
673 */
674 mfmsr r6
675 andi. r0,r6,MSR_EE
6761: tdnei r0,0
677.section __bug_table,"a"
678 .llong 1b,__LINE__ + 0x1000000, 1f, 2f
679.previous
680.section .rodata,"a"
6811: .asciz __FILE__
6822: .asciz "enter_rtas"
683.previous
684
685 /* Unfortunately, the stack pointer and the MSR are also clobbered,
686 * so they are saved in the PACA which allows us to restore
687 * our original state after RTAS returns.
688 */
689 std r1,PACAR1(r13)
690 std r6,PACASAVEDMSR(r13)
691
692 /* Setup our real return addr */
e58c3495
DG
693 LOAD_REG_ADDR(r4,.rtas_return_loc)
694 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
695 mtlr r4
696
697 li r0,0
698 ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
699 andc r0,r6,r0
700
701 li r9,1
702 rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
703 ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP
704 andc r6,r0,r9
705 ori r6,r6,MSR_RI
706 sync /* disable interrupts so SRR0/1 */
707 mtmsrd r0 /* don't get trashed */
708
e58c3495 709 LOAD_REG_ADDR(r4, rtas)
9994a338
PM
710 ld r5,RTASENTRY(r4) /* get the rtas->entry value */
711 ld r4,RTASBASE(r4) /* get the rtas->base value */
712
713 mtspr SPRN_SRR0,r5
714 mtspr SPRN_SRR1,r6
715 rfid
716 b . /* prevent speculative execution */
717
718_STATIC(rtas_return_loc)
719 /* relocation is off at this point */
720 mfspr r4,SPRN_SPRG3 /* Get PACA */
e58c3495 721 clrldi r4,r4,2 /* convert to realmode address */
9994a338
PM
722
723 mfmsr r6
724 li r0,MSR_RI
725 andc r6,r6,r0
726 sync
727 mtmsrd r6
728
729 ld r1,PACAR1(r4) /* Restore our SP */
e58c3495 730 LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
9994a338
PM
731 ld r4,PACASAVEDMSR(r4) /* Restore our MSR */
732
733 mtspr SPRN_SRR0,r3
734 mtspr SPRN_SRR1,r4
735 rfid
736 b . /* prevent speculative execution */
737
738_STATIC(rtas_restore_regs)
739 /* relocation is on at this point */
740 REST_GPR(2, r1) /* Restore the TOC */
741 REST_GPR(13, r1) /* Restore paca */
742 REST_8GPRS(14, r1) /* Restore the non-volatiles */
743 REST_10GPRS(22, r1) /* ditto */
744
745 mfspr r13,SPRN_SPRG3
746
747 ld r4,_CCR(r1)
748 mtcr r4
749 ld r5,_CTR(r1)
750 mtctr r5
751 ld r6,_XER(r1)
752 mtspr SPRN_XER,r6
753 ld r7,_DAR(r1)
754 mtdar r7
755 ld r8,_DSISR(r1)
756 mtdsisr r8
757 ld r9,_SRR0(r1)
758 mtsrr0 r9
759 ld r10,_SRR1(r1)
760 mtsrr1 r10
761
762 addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */
763 ld r0,16(r1) /* get return address */
764
765 mtlr r0
766 blr /* return to caller */
767
768#endif /* CONFIG_PPC_RTAS */
769
770#ifdef CONFIG_PPC_MULTIPLATFORM
771
772_GLOBAL(enter_prom)
773 mflr r0
774 std r0,16(r1)
775 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
776
777 /* Because PROM is running in 32b mode, it clobbers the high order half
778 * of all registers that it saves. We therefore save those registers
779 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
780 */
781 SAVE_8GPRS(2, r1)
782 SAVE_GPR(13, r1)
783 SAVE_8GPRS(14, r1)
784 SAVE_10GPRS(22, r1)
785 mfcr r4
786 std r4,_CCR(r1)
787 mfctr r5
788 std r5,_CTR(r1)
789 mfspr r6,SPRN_XER
790 std r6,_XER(r1)
791 mfdar r7
792 std r7,_DAR(r1)
793 mfdsisr r8
794 std r8,_DSISR(r1)
795 mfsrr0 r9
796 std r9,_SRR0(r1)
797 mfsrr1 r10
798 std r10,_SRR1(r1)
799 mfmsr r11
800 std r11,_MSR(r1)
801
802 /* Get the PROM entrypoint */
803 ld r0,GPR4(r1)
804 mtlr r0
805
806 /* Switch MSR to 32 bits mode
807 */
808 mfmsr r11
809 li r12,1
810 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
811 andc r11,r11,r12
812 li r12,1
813 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
814 andc r11,r11,r12
815 mtmsrd r11
816 isync
817
818 /* Restore arguments & enter PROM here... */
819 ld r3,GPR3(r1)
820 blrl
821
822 /* Just make sure that r1 top 32 bits didn't get
823 * corrupt by OF
824 */
825 rldicl r1,r1,0,32
826
827 /* Restore the MSR (back to 64 bits) */
828 ld r0,_MSR(r1)
829 mtmsrd r0
830 isync
831
832 /* Restore other registers */
833 REST_GPR(2, r1)
834 REST_GPR(13, r1)
835 REST_8GPRS(14, r1)
836 REST_10GPRS(22, r1)
837 ld r4,_CCR(r1)
838 mtcr r4
839 ld r5,_CTR(r1)
840 mtctr r5
841 ld r6,_XER(r1)
842 mtspr SPRN_XER,r6
843 ld r7,_DAR(r1)
844 mtdar r7
845 ld r8,_DSISR(r1)
846 mtdsisr r8
847 ld r9,_SRR0(r1)
848 mtsrr0 r9
849 ld r10,_SRR1(r1)
850 mtsrr1 r10
851
852 addi r1,r1,PROM_FRAME_SIZE
853 ld r0,16(r1)
854 mtlr r0
855 blr
856
857#endif /* CONFIG_PPC_MULTIPLATFORM */