Commit | Line | Data |
---|---|---|
9994a338 | 1 | /* |
9994a338 PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
5 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
6 | * Adapted for Power Macintosh by Paul Mackerras. | |
7 | * Low-level exception handlers and MMU support | |
8 | * rewritten by Paul Mackerras. | |
9 | * Copyright (C) 1996 Paul Mackerras. | |
10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | |
11 | * | |
12 | * This file contains the system call entry code, context switch | |
13 | * code, and exception/interrupt return code for PowerPC. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | */ | |
20 | ||
21 | #include <linux/config.h> | |
22 | #include <linux/errno.h> | |
23 | #include <asm/unistd.h> | |
24 | #include <asm/processor.h> | |
25 | #include <asm/page.h> | |
26 | #include <asm/mmu.h> | |
27 | #include <asm/thread_info.h> | |
28 | #include <asm/ppc_asm.h> | |
29 | #include <asm/asm-offsets.h> | |
30 | #include <asm/cputable.h> | |
31 | ||
32 | #ifdef CONFIG_PPC_ISERIES | |
33 | #define DO_SOFT_DISABLE | |
34 | #endif | |
35 | ||
36 | /* | |
37 | * System calls. | |
38 | */ | |
39 | .section ".toc","aw" | |
40 | .SYS_CALL_TABLE: | |
41 | .tc .sys_call_table[TC],.sys_call_table | |
42 | ||
43 | /* This value is used to mark exception frames on the stack. */ | |
44 | exception_marker: | |
45 | .tc ID_72656773_68657265[TC],0x7265677368657265 | |
46 | ||
47 | .section ".text" | |
48 | .align 7 | |
49 | ||
50 | #undef SHOW_SYSCALLS | |
51 | ||
52 | .globl system_call_common | |
53 | system_call_common: | |
54 | andi. r10,r12,MSR_PR | |
55 | mr r10,r1 | |
56 | addi r1,r1,-INT_FRAME_SIZE | |
57 | beq- 1f | |
58 | ld r1,PACAKSAVE(r13) | |
59 | 1: std r10,0(r1) | |
60 | std r11,_NIP(r1) | |
61 | std r12,_MSR(r1) | |
62 | std r0,GPR0(r1) | |
63 | std r10,GPR1(r1) | |
c6622f63 | 64 | ACCOUNT_CPU_USER_ENTRY(r10, r11) |
9994a338 PM |
65 | std r2,GPR2(r1) |
66 | std r3,GPR3(r1) | |
67 | std r4,GPR4(r1) | |
68 | std r5,GPR5(r1) | |
69 | std r6,GPR6(r1) | |
70 | std r7,GPR7(r1) | |
71 | std r8,GPR8(r1) | |
72 | li r11,0 | |
73 | std r11,GPR9(r1) | |
74 | std r11,GPR10(r1) | |
75 | std r11,GPR11(r1) | |
76 | std r11,GPR12(r1) | |
77 | std r9,GPR13(r1) | |
78 | crclr so | |
79 | mfcr r9 | |
80 | mflr r10 | |
81 | li r11,0xc01 | |
82 | std r9,_CCR(r1) | |
83 | std r10,_LINK(r1) | |
84 | std r11,_TRAP(r1) | |
85 | mfxer r9 | |
86 | mfctr r10 | |
87 | std r9,_XER(r1) | |
88 | std r10,_CTR(r1) | |
89 | std r3,ORIG_GPR3(r1) | |
90 | ld r2,PACATOC(r13) | |
91 | addi r9,r1,STACK_FRAME_OVERHEAD | |
92 | ld r11,exception_marker@toc(r2) | |
93 | std r11,-16(r9) /* "regshere" marker */ | |
94 | #ifdef CONFIG_PPC_ISERIES | |
95 | /* Hack for handling interrupts when soft-enabling on iSeries */ | |
96 | cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ | |
97 | andi. r10,r12,MSR_PR /* from kernel */ | |
98 | crand 4*cr0+eq,4*cr1+eq,4*cr0+eq | |
99 | beq hardware_interrupt_entry | |
100 | lbz r10,PACAPROCENABLED(r13) | |
101 | std r10,SOFTE(r1) | |
102 | #endif | |
103 | mfmsr r11 | |
104 | ori r11,r11,MSR_EE | |
105 | mtmsrd r11,1 | |
106 | ||
107 | #ifdef SHOW_SYSCALLS | |
108 | bl .do_show_syscall | |
109 | REST_GPR(0,r1) | |
110 | REST_4GPRS(3,r1) | |
111 | REST_2GPRS(7,r1) | |
112 | addi r9,r1,STACK_FRAME_OVERHEAD | |
113 | #endif | |
114 | clrrdi r11,r1,THREAD_SHIFT | |
9994a338 | 115 | ld r10,TI_FLAGS(r11) |
9994a338 PM |
116 | andi. r11,r10,_TIF_SYSCALL_T_OR_A |
117 | bne- syscall_dotrace | |
118 | syscall_dotrace_cont: | |
119 | cmpldi 0,r0,NR_syscalls | |
120 | bge- syscall_enosys | |
121 | ||
122 | system_call: /* label this so stack traces look sane */ | |
123 | /* | |
124 | * Need to vector to 32 Bit or default sys_call_table here, | |
125 | * based on caller's run-mode / personality. | |
126 | */ | |
127 | ld r11,.SYS_CALL_TABLE@toc(2) | |
128 | andi. r10,r10,_TIF_32BIT | |
129 | beq 15f | |
130 | addi r11,r11,8 /* use 32-bit syscall entries */ | |
131 | clrldi r3,r3,32 | |
132 | clrldi r4,r4,32 | |
133 | clrldi r5,r5,32 | |
134 | clrldi r6,r6,32 | |
135 | clrldi r7,r7,32 | |
136 | clrldi r8,r8,32 | |
137 | 15: | |
138 | slwi r0,r0,4 | |
139 | ldx r10,r11,r0 /* Fetch system call handler [ptr] */ | |
140 | mtctr r10 | |
141 | bctrl /* Call handler */ | |
142 | ||
143 | syscall_exit: | |
401d1f02 | 144 | std r3,RESULT(r1) |
9994a338 | 145 | #ifdef SHOW_SYSCALLS |
9994a338 | 146 | bl .do_show_syscall_exit |
401d1f02 | 147 | ld r3,RESULT(r1) |
9994a338 | 148 | #endif |
9994a338 | 149 | clrrdi r12,r1,THREAD_SHIFT |
9994a338 PM |
150 | |
151 | /* disable interrupts so current_thread_info()->flags can't change, | |
152 | and so that we don't get interrupted after loading SRR0/1. */ | |
153 | ld r8,_MSR(r1) | |
154 | andi. r10,r8,MSR_RI | |
155 | beq- unrecov_restore | |
156 | mfmsr r10 | |
157 | rldicl r10,r10,48,1 | |
158 | rotldi r10,r10,16 | |
159 | mtmsrd r10,1 | |
160 | ld r9,TI_FLAGS(r12) | |
401d1f02 | 161 | li r11,-_LAST_ERRNO |
f27201da | 162 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR|_TIF_RESTORE_SIGMASK) |
9994a338 | 163 | bne- syscall_exit_work |
401d1f02 DW |
164 | cmpld r3,r11 |
165 | ld r5,_CCR(r1) | |
166 | bge- syscall_error | |
167 | syscall_error_cont: | |
9994a338 PM |
168 | ld r7,_NIP(r1) |
169 | stdcx. r0,0,r1 /* to clear the reservation */ | |
170 | andi. r6,r8,MSR_PR | |
171 | ld r4,_LINK(r1) | |
c6622f63 PM |
172 | beq- 1f |
173 | ACCOUNT_CPU_USER_EXIT(r11, r12) | |
174 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ | |
9994a338 PM |
175 | 1: ld r2,GPR2(r1) |
176 | li r12,MSR_RI | |
3eb6f26b PM |
177 | andc r11,r10,r12 |
178 | mtmsrd r11,1 /* clear MSR.RI */ | |
9994a338 PM |
179 | ld r1,GPR1(r1) |
180 | mtlr r4 | |
181 | mtcr r5 | |
182 | mtspr SPRN_SRR0,r7 | |
183 | mtspr SPRN_SRR1,r8 | |
184 | rfid | |
185 | b . /* prevent speculative execution */ | |
186 | ||
401d1f02 | 187 | syscall_error: |
9994a338 | 188 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
401d1f02 | 189 | neg r3,r3 |
9994a338 PM |
190 | std r5,_CCR(r1) |
191 | b syscall_error_cont | |
401d1f02 | 192 | |
9994a338 PM |
193 | /* Traced system call support */ |
194 | syscall_dotrace: | |
195 | bl .save_nvgprs | |
196 | addi r3,r1,STACK_FRAME_OVERHEAD | |
197 | bl .do_syscall_trace_enter | |
198 | ld r0,GPR0(r1) /* Restore original registers */ | |
199 | ld r3,GPR3(r1) | |
200 | ld r4,GPR4(r1) | |
201 | ld r5,GPR5(r1) | |
202 | ld r6,GPR6(r1) | |
203 | ld r7,GPR7(r1) | |
204 | ld r8,GPR8(r1) | |
205 | addi r9,r1,STACK_FRAME_OVERHEAD | |
206 | clrrdi r10,r1,THREAD_SHIFT | |
207 | ld r10,TI_FLAGS(r10) | |
208 | b syscall_dotrace_cont | |
209 | ||
401d1f02 DW |
210 | syscall_enosys: |
211 | li r3,-ENOSYS | |
212 | b syscall_exit | |
213 | ||
214 | syscall_exit_work: | |
215 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. | |
216 | If TIF_NOERROR is set, just save r3 as it is. */ | |
217 | ||
218 | andi. r0,r9,_TIF_RESTOREALL | |
219 | bne- 2f | |
220 | cmpld r3,r11 /* r10 is -LAST_ERRNO */ | |
221 | blt+ 1f | |
222 | andi. r0,r9,_TIF_NOERROR | |
223 | bne- 1f | |
224 | ld r5,_CCR(r1) | |
225 | neg r3,r3 | |
226 | oris r5,r5,0x1000 /* Set SO bit in CR */ | |
227 | std r5,_CCR(r1) | |
228 | 1: std r3,GPR3(r1) | |
229 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | |
230 | beq 4f | |
231 | ||
232 | /* Clear per-syscall TIF flags if any are set, but _leave_ | |
233 | _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that | |
234 | yet. */ | |
235 | ||
236 | li r11,_TIF_PERSYSCALL_MASK | |
237 | addi r12,r12,TI_FLAGS | |
238 | 3: ldarx r10,0,r12 | |
239 | andc r10,r10,r11 | |
240 | stdcx. r10,0,r12 | |
241 | bne- 3b | |
242 | subi r12,r12,TI_FLAGS | |
243 | ||
bcb05504 | 244 | 4: bl .save_nvgprs |
401d1f02 DW |
245 | /* Anything else left to do? */ |
246 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) | |
247 | beq .ret_from_except_lite | |
248 | ||
249 | /* Re-enable interrupts */ | |
250 | mfmsr r10 | |
251 | ori r10,r10,MSR_EE | |
252 | mtmsrd r10,1 | |
253 | ||
254 | andi. r0,r9,_TIF_SAVE_NVGPRS | |
255 | bne save_user_nvgprs | |
256 | ||
257 | /* If tracing, re-enable interrupts and do it */ | |
258 | save_user_nvgprs_cont: | |
259 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | |
260 | beq 5f | |
261 | ||
9994a338 PM |
262 | addi r3,r1,STACK_FRAME_OVERHEAD |
263 | bl .do_syscall_trace_leave | |
264 | REST_NVGPRS(r1) | |
9994a338 | 265 | clrrdi r12,r1,THREAD_SHIFT |
9994a338 | 266 | |
401d1f02 DW |
267 | /* Disable interrupts again and handle other work if any */ |
268 | 5: mfmsr r10 | |
269 | rldicl r10,r10,48,1 | |
270 | rotldi r10,r10,16 | |
271 | mtmsrd r10,1 | |
272 | ||
9994a338 PM |
273 | b .ret_from_except_lite |
274 | ||
275 | /* Save non-volatile GPRs, if not already saved. */ | |
276 | _GLOBAL(save_nvgprs) | |
277 | ld r11,_TRAP(r1) | |
278 | andi. r0,r11,1 | |
279 | beqlr- | |
280 | SAVE_NVGPRS(r1) | |
281 | clrrdi r0,r11,1 | |
282 | std r0,_TRAP(r1) | |
283 | blr | |
284 | ||
401d1f02 DW |
285 | |
286 | save_user_nvgprs: | |
287 | ld r10,TI_SIGFRAME(r12) | |
288 | andi. r0,r9,_TIF_32BIT | |
289 | beq- save_user_nvgprs_64 | |
290 | ||
291 | /* 32-bit save to userspace */ | |
292 | ||
293 | .macro savewords start, end | |
294 | 1: stw \start,4*(\start)(r10) | |
295 | .section __ex_table,"a" | |
296 | .align 3 | |
297 | .llong 1b,save_user_nvgprs_fault | |
298 | .previous | |
299 | .if \end - \start | |
300 | savewords "(\start+1)",\end | |
301 | .endif | |
302 | .endm | |
303 | savewords 14,31 | |
304 | b save_user_nvgprs_cont | |
305 | ||
306 | save_user_nvgprs_64: | |
307 | /* 64-bit save to userspace */ | |
308 | ||
309 | .macro savelongs start, end | |
310 | 1: std \start,8*(\start)(r10) | |
311 | .section __ex_table,"a" | |
312 | .align 3 | |
313 | .llong 1b,save_user_nvgprs_fault | |
314 | .previous | |
315 | .if \end - \start | |
316 | savelongs "(\start+1)",\end | |
317 | .endif | |
318 | .endm | |
319 | savelongs 14,31 | |
320 | b save_user_nvgprs_cont | |
321 | ||
322 | save_user_nvgprs_fault: | |
323 | li r3,11 /* SIGSEGV */ | |
324 | ld r4,TI_TASK(r12) | |
325 | bl .force_sigsegv | |
326 | ||
327 | clrrdi r12,r1,THREAD_SHIFT | |
328 | ld r9,TI_FLAGS(r12) | |
329 | b save_user_nvgprs_cont | |
330 | ||
9994a338 PM |
331 | /* |
332 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | |
333 | * and thus put the process into the stopped state where we might | |
334 | * want to examine its user state with ptrace. Therefore we need | |
335 | * to save all the nonvolatile registers (r14 - r31) before calling | |
336 | * the C code. Similarly, fork, vfork and clone need the full | |
337 | * register state on the stack so that it can be copied to the child. | |
338 | */ | |
9994a338 PM |
339 | |
340 | _GLOBAL(ppc_fork) | |
341 | bl .save_nvgprs | |
342 | bl .sys_fork | |
343 | b syscall_exit | |
344 | ||
345 | _GLOBAL(ppc_vfork) | |
346 | bl .save_nvgprs | |
347 | bl .sys_vfork | |
348 | b syscall_exit | |
349 | ||
350 | _GLOBAL(ppc_clone) | |
351 | bl .save_nvgprs | |
352 | bl .sys_clone | |
353 | b syscall_exit | |
354 | ||
9994a338 PM |
355 | _GLOBAL(ret_from_fork) |
356 | bl .schedule_tail | |
357 | REST_NVGPRS(r1) | |
358 | li r3,0 | |
359 | b syscall_exit | |
360 | ||
361 | /* | |
362 | * This routine switches between two different tasks. The process | |
363 | * state of one is saved on its kernel stack. Then the state | |
364 | * of the other is restored from its kernel stack. The memory | |
365 | * management hardware is updated to the second process's state. | |
366 | * Finally, we can return to the second process, via ret_from_except. | |
367 | * On entry, r3 points to the THREAD for the current task, r4 | |
368 | * points to the THREAD for the new task. | |
369 | * | |
370 | * Note: there are two ways to get to the "going out" portion | |
371 | * of this code; either by coming in via the entry (_switch) | |
372 | * or via "fork" which must set up an environment equivalent | |
373 | * to the "_switch" path. If you change this you'll have to change | |
374 | * the fork code also. | |
375 | * | |
376 | * The code which creates the new task context is in 'copy_thread' | |
2ef9481e | 377 | * in arch/powerpc/kernel/process.c |
9994a338 PM |
378 | */ |
379 | .align 7 | |
380 | _GLOBAL(_switch) | |
381 | mflr r0 | |
382 | std r0,16(r1) | |
383 | stdu r1,-SWITCH_FRAME_SIZE(r1) | |
384 | /* r3-r13 are caller saved -- Cort */ | |
385 | SAVE_8GPRS(14, r1) | |
386 | SAVE_10GPRS(22, r1) | |
387 | mflr r20 /* Return to switch caller */ | |
388 | mfmsr r22 | |
389 | li r0, MSR_FP | |
390 | #ifdef CONFIG_ALTIVEC | |
391 | BEGIN_FTR_SECTION | |
392 | oris r0,r0,MSR_VEC@h /* Disable altivec */ | |
393 | mfspr r24,SPRN_VRSAVE /* save vrsave register value */ | |
394 | std r24,THREAD_VRSAVE(r3) | |
395 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
396 | #endif /* CONFIG_ALTIVEC */ | |
397 | and. r0,r0,r22 | |
398 | beq+ 1f | |
399 | andc r22,r22,r0 | |
400 | mtmsrd r22 | |
401 | isync | |
402 | 1: std r20,_NIP(r1) | |
403 | mfcr r23 | |
404 | std r23,_CCR(r1) | |
405 | std r1,KSP(r3) /* Set old stack pointer */ | |
406 | ||
407 | #ifdef CONFIG_SMP | |
408 | /* We need a sync somewhere here to make sure that if the | |
409 | * previous task gets rescheduled on another CPU, it sees all | |
410 | * stores it has performed on this one. | |
411 | */ | |
412 | sync | |
413 | #endif /* CONFIG_SMP */ | |
414 | ||
415 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ | |
416 | std r6,PACACURRENT(r13) /* Set new 'current' */ | |
417 | ||
418 | ld r8,KSP(r4) /* new stack pointer */ | |
419 | BEGIN_FTR_SECTION | |
420 | clrrdi r6,r8,28 /* get its ESID */ | |
421 | clrrdi r9,r1,28 /* get current sp ESID */ | |
422 | clrldi. r0,r6,2 /* is new ESID c00000000? */ | |
423 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | |
424 | cror eq,4*cr1+eq,eq | |
425 | beq 2f /* if yes, don't slbie it */ | |
426 | ||
427 | /* Bolt in the new stack SLB entry */ | |
428 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | |
429 | oris r0,r6,(SLB_ESID_V)@h | |
430 | ori r0,r0,(SLB_NUM_BOLTED-1)@l | |
431 | slbie r6 | |
432 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ | |
433 | slbmte r7,r0 | |
434 | isync | |
435 | ||
436 | 2: | |
437 | END_FTR_SECTION_IFSET(CPU_FTR_SLB) | |
438 | clrrdi r7,r8,THREAD_SHIFT /* base of new stack */ | |
439 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE | |
440 | because we don't need to leave the 288-byte ABI gap at the | |
441 | top of the kernel stack. */ | |
442 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE | |
443 | ||
444 | mr r1,r8 /* start using new stack pointer */ | |
445 | std r7,PACAKSAVE(r13) | |
446 | ||
447 | ld r6,_CCR(r1) | |
448 | mtcrf 0xFF,r6 | |
449 | ||
450 | #ifdef CONFIG_ALTIVEC | |
451 | BEGIN_FTR_SECTION | |
452 | ld r0,THREAD_VRSAVE(r4) | |
453 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | |
454 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |
455 | #endif /* CONFIG_ALTIVEC */ | |
456 | ||
457 | /* r3-r13 are destroyed -- Cort */ | |
458 | REST_8GPRS(14, r1) | |
459 | REST_10GPRS(22, r1) | |
460 | ||
461 | /* convert old thread to its task_struct for return value */ | |
462 | addi r3,r3,-THREAD | |
463 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | |
464 | mtlr r7 | |
465 | addi r1,r1,SWITCH_FRAME_SIZE | |
466 | blr | |
467 | ||
468 | .align 7 | |
469 | _GLOBAL(ret_from_except) | |
470 | ld r11,_TRAP(r1) | |
471 | andi. r0,r11,1 | |
472 | bne .ret_from_except_lite | |
473 | REST_NVGPRS(r1) | |
474 | ||
475 | _GLOBAL(ret_from_except_lite) | |
476 | /* | |
477 | * Disable interrupts so that current_thread_info()->flags | |
478 | * can't change between when we test it and when we return | |
479 | * from the interrupt. | |
480 | */ | |
481 | mfmsr r10 /* Get current interrupt state */ | |
482 | rldicl r9,r10,48,1 /* clear MSR_EE */ | |
483 | rotldi r9,r9,16 | |
484 | mtmsrd r9,1 /* Update machine state */ | |
485 | ||
486 | #ifdef CONFIG_PREEMPT | |
487 | clrrdi r9,r1,THREAD_SHIFT /* current_thread_info() */ | |
488 | li r0,_TIF_NEED_RESCHED /* bits to check */ | |
489 | ld r3,_MSR(r1) | |
490 | ld r4,TI_FLAGS(r9) | |
491 | /* Move MSR_PR bit in r3 to _TIF_SIGPENDING position in r0 */ | |
492 | rlwimi r0,r3,32+TIF_SIGPENDING-MSR_PR_LG,_TIF_SIGPENDING | |
493 | and. r0,r4,r0 /* check NEED_RESCHED and maybe SIGPENDING */ | |
494 | bne do_work | |
495 | ||
496 | #else /* !CONFIG_PREEMPT */ | |
497 | ld r3,_MSR(r1) /* Returning to user mode? */ | |
498 | andi. r3,r3,MSR_PR | |
499 | beq restore /* if not, just restore regs and return */ | |
500 | ||
501 | /* Check current_thread_info()->flags */ | |
502 | clrrdi r9,r1,THREAD_SHIFT | |
503 | ld r4,TI_FLAGS(r9) | |
504 | andi. r0,r4,_TIF_USER_WORK_MASK | |
505 | bne do_work | |
506 | #endif | |
507 | ||
508 | restore: | |
509 | #ifdef CONFIG_PPC_ISERIES | |
510 | ld r5,SOFTE(r1) | |
511 | cmpdi 0,r5,0 | |
512 | beq 4f | |
513 | /* Check for pending interrupts (iSeries) */ | |
3356bb9f DG |
514 | ld r3,PACALPPACAPTR(r13) |
515 | ld r3,LPPACAANYINT(r3) | |
9994a338 PM |
516 | cmpdi r3,0 |
517 | beq+ 4f /* skip do_IRQ if no interrupts */ | |
518 | ||
519 | li r3,0 | |
520 | stb r3,PACAPROCENABLED(r13) /* ensure we are soft-disabled */ | |
521 | ori r10,r10,MSR_EE | |
522 | mtmsrd r10 /* hard-enable again */ | |
523 | addi r3,r1,STACK_FRAME_OVERHEAD | |
524 | bl .do_IRQ | |
525 | b .ret_from_except_lite /* loop back and handle more */ | |
526 | ||
527 | 4: stb r5,PACAPROCENABLED(r13) | |
528 | #endif | |
529 | ||
530 | ld r3,_MSR(r1) | |
531 | andi. r0,r3,MSR_RI | |
532 | beq- unrecov_restore | |
533 | ||
534 | andi. r0,r3,MSR_PR | |
535 | ||
536 | /* | |
537 | * r13 is our per cpu area, only restore it if we are returning to | |
538 | * userspace | |
539 | */ | |
540 | beq 1f | |
c6622f63 | 541 | ACCOUNT_CPU_USER_EXIT(r3, r4) |
9994a338 PM |
542 | REST_GPR(13, r1) |
543 | 1: | |
544 | ld r3,_CTR(r1) | |
545 | ld r0,_LINK(r1) | |
546 | mtctr r3 | |
547 | mtlr r0 | |
548 | ld r3,_XER(r1) | |
549 | mtspr SPRN_XER,r3 | |
550 | ||
551 | REST_8GPRS(5, r1) | |
552 | ||
553 | stdcx. r0,0,r1 /* to clear the reservation */ | |
554 | ||
555 | mfmsr r0 | |
556 | li r2, MSR_RI | |
557 | andc r0,r0,r2 | |
558 | mtmsrd r0,1 | |
559 | ||
560 | ld r0,_MSR(r1) | |
561 | mtspr SPRN_SRR1,r0 | |
562 | ||
563 | ld r2,_CCR(r1) | |
564 | mtcrf 0xFF,r2 | |
565 | ld r2,_NIP(r1) | |
566 | mtspr SPRN_SRR0,r2 | |
567 | ||
568 | ld r0,GPR0(r1) | |
569 | ld r2,GPR2(r1) | |
570 | ld r3,GPR3(r1) | |
571 | ld r4,GPR4(r1) | |
572 | ld r1,GPR1(r1) | |
573 | ||
574 | rfid | |
575 | b . /* prevent speculative execution */ | |
576 | ||
577 | /* Note: this must change if we start using the TIF_NOTIFY_RESUME bit */ | |
578 | do_work: | |
579 | #ifdef CONFIG_PREEMPT | |
580 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | |
581 | bne user_work | |
582 | /* Check that preempt_count() == 0 and interrupts are enabled */ | |
583 | lwz r8,TI_PREEMPT(r9) | |
584 | cmpwi cr1,r8,0 | |
585 | #ifdef CONFIG_PPC_ISERIES | |
586 | ld r0,SOFTE(r1) | |
587 | cmpdi r0,0 | |
588 | #else | |
589 | andi. r0,r3,MSR_EE | |
590 | #endif | |
591 | crandc eq,cr1*4+eq,eq | |
592 | bne restore | |
593 | /* here we are preempting the current task */ | |
594 | 1: | |
595 | #ifdef CONFIG_PPC_ISERIES | |
596 | li r0,1 | |
597 | stb r0,PACAPROCENABLED(r13) | |
598 | #endif | |
599 | ori r10,r10,MSR_EE | |
600 | mtmsrd r10,1 /* reenable interrupts */ | |
601 | bl .preempt_schedule | |
602 | mfmsr r10 | |
603 | clrrdi r9,r1,THREAD_SHIFT | |
604 | rldicl r10,r10,48,1 /* disable interrupts again */ | |
605 | rotldi r10,r10,16 | |
606 | mtmsrd r10,1 | |
607 | ld r4,TI_FLAGS(r9) | |
608 | andi. r0,r4,_TIF_NEED_RESCHED | |
609 | bne 1b | |
610 | b restore | |
611 | ||
612 | user_work: | |
613 | #endif | |
614 | /* Enable interrupts */ | |
615 | ori r10,r10,MSR_EE | |
616 | mtmsrd r10,1 | |
617 | ||
618 | andi. r0,r4,_TIF_NEED_RESCHED | |
619 | beq 1f | |
620 | bl .schedule | |
621 | b .ret_from_except_lite | |
622 | ||
623 | 1: bl .save_nvgprs | |
624 | li r3,0 | |
625 | addi r4,r1,STACK_FRAME_OVERHEAD | |
626 | bl .do_signal | |
627 | b .ret_from_except | |
628 | ||
629 | unrecov_restore: | |
630 | addi r3,r1,STACK_FRAME_OVERHEAD | |
631 | bl .unrecoverable_exception | |
632 | b unrecov_restore | |
633 | ||
634 | #ifdef CONFIG_PPC_RTAS | |
635 | /* | |
636 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | |
637 | * called with the MMU off. | |
638 | * | |
639 | * In addition, we need to be in 32b mode, at least for now. | |
640 | * | |
641 | * Note: r3 is an input parameter to rtas, so don't trash it... | |
642 | */ | |
643 | _GLOBAL(enter_rtas) | |
644 | mflr r0 | |
645 | std r0,16(r1) | |
646 | stdu r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */ | |
647 | ||
648 | /* Because RTAS is running in 32b mode, it clobbers the high order half | |
649 | * of all registers that it saves. We therefore save those registers | |
650 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) | |
651 | */ | |
652 | SAVE_GPR(2, r1) /* Save the TOC */ | |
653 | SAVE_GPR(13, r1) /* Save paca */ | |
654 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ | |
655 | SAVE_10GPRS(22, r1) /* ditto */ | |
656 | ||
657 | mfcr r4 | |
658 | std r4,_CCR(r1) | |
659 | mfctr r5 | |
660 | std r5,_CTR(r1) | |
661 | mfspr r6,SPRN_XER | |
662 | std r6,_XER(r1) | |
663 | mfdar r7 | |
664 | std r7,_DAR(r1) | |
665 | mfdsisr r8 | |
666 | std r8,_DSISR(r1) | |
667 | mfsrr0 r9 | |
668 | std r9,_SRR0(r1) | |
669 | mfsrr1 r10 | |
670 | std r10,_SRR1(r1) | |
671 | ||
672 | /* There is no way it is acceptable to get here with interrupts enabled, | |
673 | * check it with the asm equivalent of WARN_ON | |
674 | */ | |
675 | mfmsr r6 | |
676 | andi. r0,r6,MSR_EE | |
677 | 1: tdnei r0,0 | |
678 | .section __bug_table,"a" | |
679 | .llong 1b,__LINE__ + 0x1000000, 1f, 2f | |
680 | .previous | |
681 | .section .rodata,"a" | |
682 | 1: .asciz __FILE__ | |
683 | 2: .asciz "enter_rtas" | |
684 | .previous | |
685 | ||
686 | /* Unfortunately, the stack pointer and the MSR are also clobbered, | |
687 | * so they are saved in the PACA which allows us to restore | |
688 | * our original state after RTAS returns. | |
689 | */ | |
690 | std r1,PACAR1(r13) | |
691 | std r6,PACASAVEDMSR(r13) | |
692 | ||
693 | /* Setup our real return addr */ | |
e58c3495 DG |
694 | LOAD_REG_ADDR(r4,.rtas_return_loc) |
695 | clrldi r4,r4,2 /* convert to realmode address */ | |
9994a338 PM |
696 | mtlr r4 |
697 | ||
698 | li r0,0 | |
699 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI | |
700 | andc r0,r6,r0 | |
701 | ||
702 | li r9,1 | |
703 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | |
704 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP | |
705 | andc r6,r0,r9 | |
706 | ori r6,r6,MSR_RI | |
707 | sync /* disable interrupts so SRR0/1 */ | |
708 | mtmsrd r0 /* don't get trashed */ | |
709 | ||
e58c3495 | 710 | LOAD_REG_ADDR(r4, rtas) |
9994a338 PM |
711 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
712 | ld r4,RTASBASE(r4) /* get the rtas->base value */ | |
713 | ||
714 | mtspr SPRN_SRR0,r5 | |
715 | mtspr SPRN_SRR1,r6 | |
716 | rfid | |
717 | b . /* prevent speculative execution */ | |
718 | ||
719 | _STATIC(rtas_return_loc) | |
720 | /* relocation is off at this point */ | |
721 | mfspr r4,SPRN_SPRG3 /* Get PACA */ | |
e58c3495 | 722 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 PM |
723 | |
724 | mfmsr r6 | |
725 | li r0,MSR_RI | |
726 | andc r6,r6,r0 | |
727 | sync | |
728 | mtmsrd r6 | |
729 | ||
730 | ld r1,PACAR1(r4) /* Restore our SP */ | |
e58c3495 | 731 | LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs) |
9994a338 PM |
732 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
733 | ||
734 | mtspr SPRN_SRR0,r3 | |
735 | mtspr SPRN_SRR1,r4 | |
736 | rfid | |
737 | b . /* prevent speculative execution */ | |
738 | ||
739 | _STATIC(rtas_restore_regs) | |
740 | /* relocation is on at this point */ | |
741 | REST_GPR(2, r1) /* Restore the TOC */ | |
742 | REST_GPR(13, r1) /* Restore paca */ | |
743 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | |
744 | REST_10GPRS(22, r1) /* ditto */ | |
745 | ||
746 | mfspr r13,SPRN_SPRG3 | |
747 | ||
748 | ld r4,_CCR(r1) | |
749 | mtcr r4 | |
750 | ld r5,_CTR(r1) | |
751 | mtctr r5 | |
752 | ld r6,_XER(r1) | |
753 | mtspr SPRN_XER,r6 | |
754 | ld r7,_DAR(r1) | |
755 | mtdar r7 | |
756 | ld r8,_DSISR(r1) | |
757 | mtdsisr r8 | |
758 | ld r9,_SRR0(r1) | |
759 | mtsrr0 r9 | |
760 | ld r10,_SRR1(r1) | |
761 | mtsrr1 r10 | |
762 | ||
763 | addi r1,r1,RTAS_FRAME_SIZE /* Unstack our frame */ | |
764 | ld r0,16(r1) /* get return address */ | |
765 | ||
766 | mtlr r0 | |
767 | blr /* return to caller */ | |
768 | ||
769 | #endif /* CONFIG_PPC_RTAS */ | |
770 | ||
771 | #ifdef CONFIG_PPC_MULTIPLATFORM | |
772 | ||
773 | _GLOBAL(enter_prom) | |
774 | mflr r0 | |
775 | std r0,16(r1) | |
776 | stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */ | |
777 | ||
778 | /* Because PROM is running in 32b mode, it clobbers the high order half | |
779 | * of all registers that it saves. We therefore save those registers | |
780 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) | |
781 | */ | |
782 | SAVE_8GPRS(2, r1) | |
783 | SAVE_GPR(13, r1) | |
784 | SAVE_8GPRS(14, r1) | |
785 | SAVE_10GPRS(22, r1) | |
786 | mfcr r4 | |
787 | std r4,_CCR(r1) | |
788 | mfctr r5 | |
789 | std r5,_CTR(r1) | |
790 | mfspr r6,SPRN_XER | |
791 | std r6,_XER(r1) | |
792 | mfdar r7 | |
793 | std r7,_DAR(r1) | |
794 | mfdsisr r8 | |
795 | std r8,_DSISR(r1) | |
796 | mfsrr0 r9 | |
797 | std r9,_SRR0(r1) | |
798 | mfsrr1 r10 | |
799 | std r10,_SRR1(r1) | |
800 | mfmsr r11 | |
801 | std r11,_MSR(r1) | |
802 | ||
803 | /* Get the PROM entrypoint */ | |
804 | ld r0,GPR4(r1) | |
805 | mtlr r0 | |
806 | ||
807 | /* Switch MSR to 32 bits mode | |
808 | */ | |
809 | mfmsr r11 | |
810 | li r12,1 | |
811 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | |
812 | andc r11,r11,r12 | |
813 | li r12,1 | |
814 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | |
815 | andc r11,r11,r12 | |
816 | mtmsrd r11 | |
817 | isync | |
818 | ||
819 | /* Restore arguments & enter PROM here... */ | |
820 | ld r3,GPR3(r1) | |
821 | blrl | |
822 | ||
823 | /* Just make sure that r1 top 32 bits didn't get | |
824 | * corrupt by OF | |
825 | */ | |
826 | rldicl r1,r1,0,32 | |
827 | ||
828 | /* Restore the MSR (back to 64 bits) */ | |
829 | ld r0,_MSR(r1) | |
830 | mtmsrd r0 | |
831 | isync | |
832 | ||
833 | /* Restore other registers */ | |
834 | REST_GPR(2, r1) | |
835 | REST_GPR(13, r1) | |
836 | REST_8GPRS(14, r1) | |
837 | REST_10GPRS(22, r1) | |
838 | ld r4,_CCR(r1) | |
839 | mtcr r4 | |
840 | ld r5,_CTR(r1) | |
841 | mtctr r5 | |
842 | ld r6,_XER(r1) | |
843 | mtspr SPRN_XER,r6 | |
844 | ld r7,_DAR(r1) | |
845 | mtdar r7 | |
846 | ld r8,_DSISR(r1) | |
847 | mtdsisr r8 | |
848 | ld r9,_SRR0(r1) | |
849 | mtsrr0 r9 | |
850 | ld r10,_SRR1(r1) | |
851 | mtsrr1 r10 | |
852 | ||
853 | addi r1,r1,PROM_FRAME_SIZE | |
854 | ld r0,16(r1) | |
855 | mtlr r0 | |
856 | blr | |
857 | ||
858 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |