Commit | Line | Data |
---|---|---|
9994a338 | 1 | /* |
9994a338 PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
5 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
6 | * Adapted for Power Macintosh by Paul Mackerras. | |
7 | * Low-level exception handlers and MMU support | |
8 | * rewritten by Paul Mackerras. | |
9 | * Copyright (C) 1996 Paul Mackerras. | |
10 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | |
11 | * | |
12 | * This file contains the system call entry code, context switch | |
13 | * code, and exception/interrupt return code for PowerPC. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | */ | |
20 | ||
9994a338 | 21 | #include <linux/errno.h> |
c3525940 | 22 | #include <linux/err.h> |
9994a338 PM |
23 | #include <asm/unistd.h> |
24 | #include <asm/processor.h> | |
25 | #include <asm/page.h> | |
26 | #include <asm/mmu.h> | |
27 | #include <asm/thread_info.h> | |
ee13cb24 | 28 | #include <asm/code-patching-asm.h> |
9994a338 PM |
29 | #include <asm/ppc_asm.h> |
30 | #include <asm/asm-offsets.h> | |
31 | #include <asm/cputable.h> | |
3f639ee8 | 32 | #include <asm/firmware.h> |
007d88d0 | 33 | #include <asm/bug.h> |
ec2b36b9 | 34 | #include <asm/ptrace.h> |
945feb17 | 35 | #include <asm/irqflags.h> |
7230c564 | 36 | #include <asm/hw_irq.h> |
5d1c5745 | 37 | #include <asm/context_tracking.h> |
b4b56f9e | 38 | #include <asm/tm.h> |
8a649045 | 39 | #include <asm/ppc-opcode.h> |
51973a81 | 40 | #include <asm/barrier.h> |
9445aa1a | 41 | #include <asm/export.h> |
ec0c464c | 42 | #include <asm/asm-compat.h> |
222f20f1 NP |
43 | #ifdef CONFIG_PPC_BOOK3S |
44 | #include <asm/exception-64s.h> | |
45 | #else | |
46 | #include <asm/exception-64e.h> | |
47 | #endif | |
2c86cd18 | 48 | #include <asm/feature-fixups.h> |
9994a338 PM |
49 | |
50 | /* | |
51 | * System calls. | |
52 | */ | |
53 | .section ".toc","aw" | |
c857c43b AB |
54 | SYS_CALL_TABLE: |
55 | .tc sys_call_table[TC],sys_call_table | |
9994a338 PM |
56 | |
57 | /* This value is used to mark exception frames on the stack. */ | |
58 | exception_marker: | |
ec2b36b9 | 59 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
9994a338 PM |
60 | |
61 | .section ".text" | |
62 | .align 7 | |
63 | ||
9994a338 PM |
64 | .globl system_call_common |
65 | system_call_common: | |
b4b56f9e S |
66 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
67 | BEGIN_FTR_SECTION | |
68 | extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ | |
cf7d6fb0 | 69 | bne .Ltabort_syscall |
b4b56f9e S |
70 | END_FTR_SECTION_IFSET(CPU_FTR_TM) |
71 | #endif | |
9994a338 PM |
72 | andi. r10,r12,MSR_PR |
73 | mr r10,r1 | |
74 | addi r1,r1,-INT_FRAME_SIZE | |
75 | beq- 1f | |
76 | ld r1,PACAKSAVE(r13) | |
77 | 1: std r10,0(r1) | |
78 | std r11,_NIP(r1) | |
79 | std r12,_MSR(r1) | |
80 | std r0,GPR0(r1) | |
81 | std r10,GPR1(r1) | |
5d75b264 | 82 | beq 2f /* if from kernel mode */ |
c223c903 | 83 | ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) |
5d75b264 | 84 | 2: std r2,GPR2(r1) |
9994a338 | 85 | std r3,GPR3(r1) |
fd6c40f3 | 86 | mfcr r2 |
9994a338 PM |
87 | std r4,GPR4(r1) |
88 | std r5,GPR5(r1) | |
89 | std r6,GPR6(r1) | |
90 | std r7,GPR7(r1) | |
91 | std r8,GPR8(r1) | |
92 | li r11,0 | |
93 | std r11,GPR9(r1) | |
94 | std r11,GPR10(r1) | |
95 | std r11,GPR11(r1) | |
96 | std r11,GPR12(r1) | |
823df435 | 97 | std r11,_XER(r1) |
82087414 | 98 | std r11,_CTR(r1) |
9994a338 | 99 | std r9,GPR13(r1) |
9994a338 | 100 | mflr r10 |
fd6c40f3 AB |
101 | /* |
102 | * This clears CR0.SO (bit 28), which is the error indication on | |
103 | * return from this system call. | |
104 | */ | |
105 | rldimi r2,r11,28,(63-28) | |
9994a338 | 106 | li r11,0xc01 |
9994a338 PM |
107 | std r10,_LINK(r1) |
108 | std r11,_TRAP(r1) | |
9994a338 | 109 | std r3,ORIG_GPR3(r1) |
fd6c40f3 | 110 | std r2,_CCR(r1) |
9994a338 PM |
111 | ld r2,PACATOC(r13) |
112 | addi r9,r1,STACK_FRAME_OVERHEAD | |
113 | ld r11,exception_marker@toc(r2) | |
114 | std r11,-16(r9) /* "regshere" marker */ | |
abf917cd | 115 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) |
cf9efce0 PM |
116 | BEGIN_FW_FTR_SECTION |
117 | beq 33f | |
118 | /* if from user, see if there are any DTL entries to process */ | |
119 | ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ | |
120 | ld r11,PACA_DTL_RIDX(r13) /* get log read index */ | |
7ffcf8ec AB |
121 | addi r10,r10,LPPACA_DTLIDX |
122 | LDX_BE r10,0,r10 /* get log write index */ | |
cf9efce0 PM |
123 | cmpd cr1,r11,r10 |
124 | beq+ cr1,33f | |
b1576fec | 125 | bl accumulate_stolen_time |
cf9efce0 PM |
126 | REST_GPR(0,r1) |
127 | REST_4GPRS(3,r1) | |
128 | REST_2GPRS(7,r1) | |
129 | addi r9,r1,STACK_FRAME_OVERHEAD | |
130 | 33: | |
131 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |
abf917cd | 132 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ |
cf9efce0 | 133 | |
1421ae0b BH |
134 | /* |
135 | * A syscall should always be called with interrupts enabled | |
136 | * so we just unconditionally hard-enable here. When some kind | |
137 | * of irq tracing is used, we additionally check that condition | |
138 | * is correct | |
139 | */ | |
9aa88188 | 140 | #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) |
4e26bc4a | 141 | lbz r10,PACAIRQSOFTMASK(r13) |
01417c6c | 142 | 1: tdnei r10,IRQS_ENABLED |
1421ae0b BH |
143 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
144 | #endif | |
2d27cfd3 | 145 | |
2d27cfd3 BH |
146 | #ifdef CONFIG_PPC_BOOK3E |
147 | wrteei 1 | |
148 | #else | |
49d09bf2 | 149 | li r11,MSR_RI |
9994a338 PM |
150 | ori r11,r11,MSR_EE |
151 | mtmsrd r11,1 | |
2d27cfd3 | 152 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 153 | |
266de3a8 | 154 | system_call: /* label this so stack traces look sane */ |
1421ae0b BH |
155 | /* We do need to set SOFTE in the stack frame or the return |
156 | * from interrupt will be painful | |
157 | */ | |
c2e480ba | 158 | li r10,IRQS_ENABLED |
1421ae0b BH |
159 | std r10,SOFTE(r1) |
160 | ||
9778b696 | 161 | CURRENT_THREAD_INFO(r11, r1) |
9994a338 | 162 | ld r10,TI_FLAGS(r11) |
10ea8343 | 163 | andi. r11,r10,_TIF_SYSCALL_DOTRACE |
cf7d6fb0 | 164 | bne .Lsyscall_dotrace /* does not return */ |
9994a338 | 165 | cmpldi 0,r0,NR_syscalls |
cf7d6fb0 | 166 | bge- .Lsyscall_enosys |
9994a338 | 167 | |
266de3a8 | 168 | .Lsyscall: |
9994a338 PM |
169 | /* |
170 | * Need to vector to 32 Bit or default sys_call_table here, | |
171 | * based on caller's run-mode / personality. | |
172 | */ | |
c857c43b | 173 | ld r11,SYS_CALL_TABLE@toc(2) |
16d7c69c | 174 | andis. r10,r10,_TIF_32BIT@h |
9994a338 PM |
175 | beq 15f |
176 | addi r11,r11,8 /* use 32-bit syscall entries */ | |
177 | clrldi r3,r3,32 | |
178 | clrldi r4,r4,32 | |
179 | clrldi r5,r5,32 | |
180 | clrldi r6,r6,32 | |
181 | clrldi r7,r7,32 | |
182 | clrldi r8,r8,32 | |
183 | 15: | |
184 | slwi r0,r0,4 | |
51973a81 ME |
185 | |
186 | barrier_nospec_asm | |
187 | /* | |
188 | * Prevent the load of the handler below (based on the user-passed | |
189 | * system call number) being speculatively executed until the test | |
190 | * against NR_syscalls and branch to .Lsyscall_enosys above has | |
191 | * committed. | |
192 | */ | |
193 | ||
cc7efbf9 AB |
194 | ldx r12,r11,r0 /* Fetch system call handler [ptr] */ |
195 | mtctr r12 | |
9994a338 PM |
196 | bctrl /* Call handler */ |
197 | ||
4c3b2168 | 198 | .Lsyscall_exit: |
401d1f02 | 199 | std r3,RESULT(r1) |
6f37be4b BF |
200 | |
201 | #ifdef CONFIG_DEBUG_RSEQ | |
202 | /* Check whether the syscall is issued inside a restartable sequence */ | |
203 | addi r3,r1,STACK_FRAME_OVERHEAD | |
204 | bl rseq_syscall | |
205 | ld r3,RESULT(r1) | |
206 | #endif | |
207 | ||
9778b696 | 208 | CURRENT_THREAD_INFO(r12, r1) |
9994a338 | 209 | |
9994a338 | 210 | ld r8,_MSR(r1) |
2d27cfd3 BH |
211 | #ifdef CONFIG_PPC_BOOK3S |
212 | /* No MSR:RI on BookE */ | |
9994a338 | 213 | andi. r10,r8,MSR_RI |
15770a13 | 214 | beq- .Lunrecov_restore |
2d27cfd3 | 215 | #endif |
3639d661 NR |
216 | |
217 | /* | |
218 | * This is a few instructions into the actual syscall exit path (which actually | |
219 | * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the | |
220 | * number of visible symbols for profiling purposes. | |
221 | * | |
222 | * We can probe from system_call until this point as MSR_RI is set. But once it | |
223 | * is cleared below, we won't be able to take a trap. | |
224 | * | |
225 | * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL(). | |
226 | */ | |
227 | system_call_exit: | |
1421ae0b BH |
228 | /* |
229 | * Disable interrupts so current_thread_info()->flags can't change, | |
2d27cfd3 BH |
230 | * and so that we don't get interrupted after loading SRR0/1. |
231 | */ | |
232 | #ifdef CONFIG_PPC_BOOK3E | |
233 | wrteei 0 | |
234 | #else | |
ac1dc365 AB |
235 | /* |
236 | * For performance reasons we clear RI the same time that we | |
237 | * clear EE. We only need to clear RI just before we restore r13 | |
238 | * below, but batching it with EE saves us one expensive mtmsrd call. | |
239 | * We have to be careful to restore RI if we branch anywhere from | |
240 | * here (eg syscall_exit_work). | |
241 | */ | |
49d09bf2 | 242 | li r11,0 |
ac1dc365 | 243 | mtmsrd r11,1 |
2d27cfd3 BH |
244 | #endif /* CONFIG_PPC_BOOK3E */ |
245 | ||
9994a338 | 246 | ld r9,TI_FLAGS(r12) |
c3525940 | 247 | li r11,-MAX_ERRNO |
10ea8343 | 248 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
cf7d6fb0 | 249 | bne- .Lsyscall_exit_work |
70fe3d98 | 250 | |
44a12806 ME |
251 | andi. r0,r8,MSR_FP |
252 | beq 2f | |
70fe3d98 | 253 | #ifdef CONFIG_ALTIVEC |
44a12806 ME |
254 | andis. r0,r8,MSR_VEC@h |
255 | bne 3f | |
70fe3d98 | 256 | #endif |
44a12806 ME |
257 | 2: addi r3,r1,STACK_FRAME_OVERHEAD |
258 | #ifdef CONFIG_PPC_BOOK3S | |
259 | li r10,MSR_RI | |
260 | mtmsrd r10,1 /* Restore RI */ | |
261 | #endif | |
262 | bl restore_math | |
263 | #ifdef CONFIG_PPC_BOOK3S | |
264 | li r11,0 | |
265 | mtmsrd r11,1 | |
266 | #endif | |
267 | ld r8,_MSR(r1) | |
268 | ld r3,RESULT(r1) | |
269 | li r11,-MAX_ERRNO | |
70fe3d98 | 270 | |
44a12806 | 271 | 3: cmpld r3,r11 |
401d1f02 | 272 | ld r5,_CCR(r1) |
cf7d6fb0 | 273 | bge- .Lsyscall_error |
d14299de | 274 | .Lsyscall_error_cont: |
9994a338 | 275 | ld r7,_NIP(r1) |
f89451fb | 276 | BEGIN_FTR_SECTION |
9994a338 | 277 | stdcx. r0,0,r1 /* to clear the reservation */ |
f89451fb | 278 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
9994a338 PM |
279 | andi. r6,r8,MSR_PR |
280 | ld r4,_LINK(r1) | |
2d27cfd3 | 281 | |
c6622f63 | 282 | beq- 1f |
c223c903 | 283 | ACCOUNT_CPU_USER_EXIT(r13, r11, r12) |
d030a4b5 ME |
284 | |
285 | BEGIN_FTR_SECTION | |
286 | HMT_MEDIUM_LOW | |
287 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
288 | ||
c6622f63 | 289 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
b8e90cb7 NP |
290 | ld r2,GPR2(r1) |
291 | ld r1,GPR1(r1) | |
292 | mtlr r4 | |
293 | mtcr r5 | |
294 | mtspr SPRN_SRR0,r7 | |
295 | mtspr SPRN_SRR1,r8 | |
296 | RFI_TO_USER | |
297 | b . /* prevent speculative execution */ | |
298 | ||
299 | /* exit to kernel */ | |
9994a338 | 300 | 1: ld r2,GPR2(r1) |
9994a338 PM |
301 | ld r1,GPR1(r1) |
302 | mtlr r4 | |
303 | mtcr r5 | |
304 | mtspr SPRN_SRR0,r7 | |
305 | mtspr SPRN_SRR1,r8 | |
b8e90cb7 | 306 | RFI_TO_KERNEL |
9994a338 PM |
307 | b . /* prevent speculative execution */ |
308 | ||
cf7d6fb0 | 309 | .Lsyscall_error: |
9994a338 | 310 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
401d1f02 | 311 | neg r3,r3 |
9994a338 | 312 | std r5,_CCR(r1) |
d14299de | 313 | b .Lsyscall_error_cont |
bc4f65e4 | 314 | |
9994a338 | 315 | /* Traced system call support */ |
cf7d6fb0 | 316 | .Lsyscall_dotrace: |
b1576fec | 317 | bl save_nvgprs |
9994a338 | 318 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec | 319 | bl do_syscall_trace_enter |
d3837414 | 320 | |
4f72c427 | 321 | /* |
d3837414 ME |
322 | * We use the return value of do_syscall_trace_enter() as the syscall |
323 | * number. If the syscall was rejected for any reason do_syscall_trace_enter() | |
324 | * returns an invalid syscall number and the test below against | |
325 | * NR_syscalls will fail. | |
4f72c427 RM |
326 | */ |
327 | mr r0,r3 | |
d3837414 ME |
328 | |
329 | /* Restore argument registers just clobbered and/or possibly changed. */ | |
9994a338 PM |
330 | ld r3,GPR3(r1) |
331 | ld r4,GPR4(r1) | |
332 | ld r5,GPR5(r1) | |
333 | ld r6,GPR6(r1) | |
334 | ld r7,GPR7(r1) | |
335 | ld r8,GPR8(r1) | |
d3837414 | 336 | |
266de3a8 | 337 | /* Repopulate r9 and r10 for the syscall path */ |
9994a338 | 338 | addi r9,r1,STACK_FRAME_OVERHEAD |
9778b696 | 339 | CURRENT_THREAD_INFO(r10, r1) |
9994a338 | 340 | ld r10,TI_FLAGS(r10) |
d3837414 ME |
341 | |
342 | cmpldi r0,NR_syscalls | |
266de3a8 | 343 | blt+ .Lsyscall |
d3837414 ME |
344 | |
345 | /* Return code is already in r3 thanks to do_syscall_trace_enter() */ | |
346 | b .Lsyscall_exit | |
347 | ||
9994a338 | 348 | |
cf7d6fb0 | 349 | .Lsyscall_enosys: |
401d1f02 | 350 | li r3,-ENOSYS |
4c3b2168 | 351 | b .Lsyscall_exit |
401d1f02 | 352 | |
cf7d6fb0 | 353 | .Lsyscall_exit_work: |
ac1dc365 | 354 | #ifdef CONFIG_PPC_BOOK3S |
49d09bf2 | 355 | li r10,MSR_RI |
ac1dc365 AB |
356 | mtmsrd r10,1 /* Restore RI */ |
357 | #endif | |
401d1f02 DW |
358 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. |
359 | If TIF_NOERROR is set, just save r3 as it is. */ | |
360 | ||
361 | andi. r0,r9,_TIF_RESTOREALL | |
1bd79336 PM |
362 | beq+ 0f |
363 | REST_NVGPRS(r1) | |
364 | b 2f | |
c3525940 | 365 | 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */ |
401d1f02 DW |
366 | blt+ 1f |
367 | andi. r0,r9,_TIF_NOERROR | |
368 | bne- 1f | |
369 | ld r5,_CCR(r1) | |
370 | neg r3,r3 | |
371 | oris r5,r5,0x1000 /* Set SO bit in CR */ | |
372 | std r5,_CCR(r1) | |
373 | 1: std r3,GPR3(r1) | |
374 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | |
375 | beq 4f | |
376 | ||
1bd79336 | 377 | /* Clear per-syscall TIF flags if any are set. */ |
401d1f02 DW |
378 | |
379 | li r11,_TIF_PERSYSCALL_MASK | |
380 | addi r12,r12,TI_FLAGS | |
381 | 3: ldarx r10,0,r12 | |
382 | andc r10,r10,r11 | |
383 | stdcx. r10,0,r12 | |
384 | bne- 3b | |
385 | subi r12,r12,TI_FLAGS | |
1bd79336 PM |
386 | |
387 | 4: /* Anything else left to do? */ | |
d8725ce8 | 388 | BEGIN_FTR_SECTION |
4c2de74c | 389 | lis r3,DEFAULT_PPR@highest /* Set default PPR */ |
d8725ce8 | 390 | sldi r3,r3,32 /* bits 11-13 are used for ppr */ |
4c2de74c | 391 | std r3,_PPR(r1) |
d8725ce8 ME |
392 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
393 | ||
10ea8343 | 394 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) |
b1576fec | 395 | beq ret_from_except_lite |
401d1f02 DW |
396 | |
397 | /* Re-enable interrupts */ | |
2d27cfd3 BH |
398 | #ifdef CONFIG_PPC_BOOK3E |
399 | wrteei 1 | |
400 | #else | |
49d09bf2 | 401 | li r10,MSR_RI |
401d1f02 DW |
402 | ori r10,r10,MSR_EE |
403 | mtmsrd r10,1 | |
2d27cfd3 | 404 | #endif /* CONFIG_PPC_BOOK3E */ |
401d1f02 | 405 | |
b1576fec | 406 | bl save_nvgprs |
9994a338 | 407 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec AB |
408 | bl do_syscall_trace_leave |
409 | b ret_from_except | |
9994a338 | 410 | |
b4b56f9e | 411 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
cf7d6fb0 | 412 | .Ltabort_syscall: |
b4b56f9e S |
413 | /* Firstly we need to enable TM in the kernel */ |
414 | mfmsr r10 | |
cc7786d3 NP |
415 | li r9, 1 |
416 | rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG | |
b4b56f9e S |
417 | mtmsrd r10, 0 |
418 | ||
419 | /* tabort, this dooms the transaction, nothing else */ | |
cc7786d3 NP |
420 | li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) |
421 | TABORT(R9) | |
b4b56f9e S |
422 | |
423 | /* | |
424 | * Return directly to userspace. We have corrupted user register state, | |
425 | * but userspace will never see that register state. Execution will | |
426 | * resume after the tbegin of the aborted transaction with the | |
427 | * checkpointed register state. | |
428 | */ | |
cc7786d3 NP |
429 | li r9, MSR_RI |
430 | andc r10, r10, r9 | |
b4b56f9e S |
431 | mtmsrd r10, 1 |
432 | mtspr SPRN_SRR0, r11 | |
433 | mtspr SPRN_SRR1, r12 | |
222f20f1 | 434 | RFI_TO_USER |
b4b56f9e S |
435 | b . /* prevent speculative execution */ |
436 | #endif | |
cf7d6fb0 | 437 | _ASM_NOKPROBE_SYMBOL(system_call_common); |
3639d661 | 438 | _ASM_NOKPROBE_SYMBOL(system_call_exit); |
b4b56f9e | 439 | |
9994a338 PM |
440 | /* Save non-volatile GPRs, if not already saved. */ |
441 | _GLOBAL(save_nvgprs) | |
442 | ld r11,_TRAP(r1) | |
443 | andi. r0,r11,1 | |
444 | beqlr- | |
445 | SAVE_NVGPRS(r1) | |
446 | clrrdi r0,r11,1 | |
447 | std r0,_TRAP(r1) | |
448 | blr | |
15770a13 | 449 | _ASM_NOKPROBE_SYMBOL(save_nvgprs); |
9994a338 | 450 | |
401d1f02 | 451 | |
9994a338 PM |
452 | /* |
453 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | |
454 | * and thus put the process into the stopped state where we might | |
455 | * want to examine its user state with ptrace. Therefore we need | |
456 | * to save all the nonvolatile registers (r14 - r31) before calling | |
457 | * the C code. Similarly, fork, vfork and clone need the full | |
458 | * register state on the stack so that it can be copied to the child. | |
459 | */ | |
9994a338 PM |
460 | |
461 | _GLOBAL(ppc_fork) | |
b1576fec AB |
462 | bl save_nvgprs |
463 | bl sys_fork | |
4c3b2168 | 464 | b .Lsyscall_exit |
9994a338 PM |
465 | |
466 | _GLOBAL(ppc_vfork) | |
b1576fec AB |
467 | bl save_nvgprs |
468 | bl sys_vfork | |
4c3b2168 | 469 | b .Lsyscall_exit |
9994a338 PM |
470 | |
471 | _GLOBAL(ppc_clone) | |
b1576fec AB |
472 | bl save_nvgprs |
473 | bl sys_clone | |
4c3b2168 | 474 | b .Lsyscall_exit |
9994a338 | 475 | |
1bd79336 | 476 | _GLOBAL(ppc32_swapcontext) |
b1576fec AB |
477 | bl save_nvgprs |
478 | bl compat_sys_swapcontext | |
4c3b2168 | 479 | b .Lsyscall_exit |
1bd79336 PM |
480 | |
481 | _GLOBAL(ppc64_swapcontext) | |
b1576fec AB |
482 | bl save_nvgprs |
483 | bl sys_swapcontext | |
4c3b2168 | 484 | b .Lsyscall_exit |
1bd79336 | 485 | |
529d235a ME |
486 | _GLOBAL(ppc_switch_endian) |
487 | bl save_nvgprs | |
488 | bl sys_switch_endian | |
489 | b .Lsyscall_exit | |
490 | ||
9994a338 | 491 | _GLOBAL(ret_from_fork) |
b1576fec | 492 | bl schedule_tail |
9994a338 PM |
493 | REST_NVGPRS(r1) |
494 | li r3,0 | |
4c3b2168 | 495 | b .Lsyscall_exit |
9994a338 | 496 | |
58254e10 | 497 | _GLOBAL(ret_from_kernel_thread) |
b1576fec | 498 | bl schedule_tail |
58254e10 | 499 | REST_NVGPRS(r1) |
58254e10 AV |
500 | mtlr r14 |
501 | mr r3,r15 | |
f55d9665 | 502 | #ifdef PPC64_ELF_ABI_v2 |
7cedd601 AB |
503 | mr r12,r14 |
504 | #endif | |
58254e10 AV |
505 | blrl |
506 | li r3,0 | |
4c3b2168 | 507 | b .Lsyscall_exit |
be6abfa7 | 508 | |
ee13cb24 ME |
509 | #ifdef CONFIG_PPC_BOOK3S_64 |
510 | ||
511 | #define FLUSH_COUNT_CACHE \ | |
512 | 1: nop; \ | |
513 | patch_site 1b, patch__call_flush_count_cache | |
514 | ||
515 | ||
516 | #define BCCTR_FLUSH .long 0x4c400420 | |
517 | ||
518 | .macro nops number | |
519 | .rept \number | |
520 | nop | |
521 | .endr | |
522 | .endm | |
523 | ||
524 | .balign 32 | |
525 | .global flush_count_cache | |
526 | flush_count_cache: | |
527 | /* Save LR into r9 */ | |
528 | mflr r9 | |
529 | ||
530 | .rept 64 | |
531 | bl .+4 | |
532 | .endr | |
533 | b 1f | |
534 | nops 6 | |
535 | ||
536 | .balign 32 | |
537 | /* Restore LR */ | |
538 | 1: mtlr r9 | |
539 | li r9,0x7fff | |
540 | mtctr r9 | |
541 | ||
542 | BCCTR_FLUSH | |
543 | ||
544 | 2: nop | |
545 | patch_site 2b patch__flush_count_cache_return | |
546 | ||
547 | nops 3 | |
548 | ||
549 | .rept 278 | |
550 | .balign 32 | |
551 | BCCTR_FLUSH | |
552 | nops 7 | |
553 | .endr | |
554 | ||
555 | blr | |
556 | #else | |
557 | #define FLUSH_COUNT_CACHE | |
558 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
559 | ||
9994a338 PM |
560 | /* |
561 | * This routine switches between two different tasks. The process | |
562 | * state of one is saved on its kernel stack. Then the state | |
563 | * of the other is restored from its kernel stack. The memory | |
564 | * management hardware is updated to the second process's state. | |
565 | * Finally, we can return to the second process, via ret_from_except. | |
566 | * On entry, r3 points to the THREAD for the current task, r4 | |
567 | * points to the THREAD for the new task. | |
568 | * | |
569 | * Note: there are two ways to get to the "going out" portion | |
570 | * of this code; either by coming in via the entry (_switch) | |
571 | * or via "fork" which must set up an environment equivalent | |
572 | * to the "_switch" path. If you change this you'll have to change | |
573 | * the fork code also. | |
574 | * | |
575 | * The code which creates the new task context is in 'copy_thread' | |
2ef9481e | 576 | * in arch/powerpc/kernel/process.c |
9994a338 PM |
577 | */ |
578 | .align 7 | |
579 | _GLOBAL(_switch) | |
580 | mflr r0 | |
581 | std r0,16(r1) | |
582 | stdu r1,-SWITCH_FRAME_SIZE(r1) | |
583 | /* r3-r13 are caller saved -- Cort */ | |
584 | SAVE_8GPRS(14, r1) | |
585 | SAVE_10GPRS(22, r1) | |
68bfa962 | 586 | std r0,_NIP(r1) /* Return to switch caller */ |
9994a338 PM |
587 | mfcr r23 |
588 | std r23,_CCR(r1) | |
589 | std r1,KSP(r3) /* Set old stack pointer */ | |
590 | ||
ee13cb24 ME |
591 | FLUSH_COUNT_CACHE |
592 | ||
9145effd NP |
593 | /* |
594 | * On SMP kernels, care must be taken because a task may be | |
595 | * scheduled off CPUx and on to CPUy. Memory ordering must be | |
596 | * considered. | |
597 | * | |
598 | * Cacheable stores on CPUx will be visible when the task is | |
599 | * scheduled on CPUy by virtue of the core scheduler barriers | |
600 | * (see "Notes on Program-Order guarantees on SMP systems." in | |
601 | * kernel/sched/core.c). | |
602 | * | |
603 | * Uncacheable stores in the case of involuntary preemption must | |
604 | * be taken care of. The smp_mb__before_spin_lock() in __schedule() | |
605 | * is implemented as hwsync on powerpc, which orders MMIO too. So | |
606 | * long as there is an hwsync in the context switch path, it will | |
607 | * be executed on the source CPU after the task has performed | |
608 | * all MMIO ops on that CPU, and on the destination CPU before the | |
609 | * task performs any MMIO ops there. | |
9994a338 | 610 | */ |
9994a338 | 611 | |
f89451fb | 612 | /* |
837e72f7 NP |
613 | * The kernel context switch path must contain a spin_lock, |
614 | * which contains larx/stcx, which will clear any reservation | |
615 | * of the task being switched. | |
f89451fb | 616 | */ |
a515348f MN |
617 | #ifdef CONFIG_PPC_BOOK3S |
618 | /* Cancel all explict user streams as they will have no use after context | |
619 | * switch and will stop the HW from creating streams itself | |
620 | */ | |
15a3204d | 621 | DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6) |
a515348f MN |
622 | #endif |
623 | ||
9994a338 PM |
624 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
625 | std r6,PACACURRENT(r13) /* Set new 'current' */ | |
06ec27ae CL |
626 | #if defined(CONFIG_STACKPROTECTOR) |
627 | ld r6, TASK_CANARY(r6) | |
628 | std r6, PACA_CANARY(r13) | |
629 | #endif | |
9994a338 PM |
630 | |
631 | ld r8,KSP(r4) /* new stack pointer */ | |
4e003747 | 632 | #ifdef CONFIG_PPC_BOOK3S_64 |
caca285e AK |
633 | BEGIN_MMU_FTR_SECTION |
634 | b 2f | |
5a25b6f5 | 635 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
1189be65 | 636 | BEGIN_FTR_SECTION |
9994a338 PM |
637 | clrrdi r6,r8,28 /* get its ESID */ |
638 | clrrdi r9,r1,28 /* get current sp ESID */ | |
13b3d13b | 639 | FTR_SECTION_ELSE |
1189be65 PM |
640 | clrrdi r6,r8,40 /* get its 1T ESID */ |
641 | clrrdi r9,r1,40 /* get current sp 1T ESID */ | |
13b3d13b | 642 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT) |
9994a338 PM |
643 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
644 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | |
645 | cror eq,4*cr1+eq,eq | |
646 | beq 2f /* if yes, don't slbie it */ | |
647 | ||
648 | /* Bolt in the new stack SLB entry */ | |
649 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | |
650 | oris r0,r6,(SLB_ESID_V)@h | |
651 | ori r0,r0,(SLB_NUM_BOLTED-1)@l | |
1189be65 PM |
652 | BEGIN_FTR_SECTION |
653 | li r9,MMU_SEGSIZE_1T /* insert B field */ | |
654 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h | |
655 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 | |
44ae3ab3 | 656 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
2f6093c8 | 657 | |
00efee7d MN |
658 | /* Update the last bolted SLB. No write barriers are needed |
659 | * here, provided we only update the current CPU's SLB shadow | |
660 | * buffer. | |
661 | */ | |
2f6093c8 | 662 | ld r9,PACA_SLBSHADOWPTR(r13) |
11a27ad7 | 663 | li r12,0 |
7ffcf8ec AB |
664 | std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ |
665 | li r12,SLBSHADOW_STACKVSID | |
666 | STDX_BE r7,r12,r9 /* Save VSID */ | |
667 | li r12,SLBSHADOW_STACKESID | |
668 | STDX_BE r0,r12,r9 /* Save ESID */ | |
2f6093c8 | 669 | |
44ae3ab3 | 670 | /* No need to check for MMU_FTR_NO_SLBIE_B here, since when |
f66bce5e OJ |
671 | * we have 1TB segments, the only CPUs known to have the errata |
672 | * only support less than 1TB of system memory and we'll never | |
673 | * actually hit this code path. | |
674 | */ | |
675 | ||
91d06971 | 676 | isync |
9994a338 | 677 | slbie r6 |
505ea82e | 678 | BEGIN_FTR_SECTION |
9994a338 | 679 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ |
505ea82e | 680 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
9994a338 PM |
681 | slbmte r7,r0 |
682 | isync | |
9994a338 | 683 | 2: |
4e003747 | 684 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
2d27cfd3 | 685 | |
9778b696 | 686 | CURRENT_THREAD_INFO(r7, r8) /* base of new stack */ |
9994a338 PM |
687 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
688 | because we don't need to leave the 288-byte ABI gap at the | |
689 | top of the kernel stack. */ | |
690 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE | |
691 | ||
e4c0fc5f NP |
692 | /* |
693 | * PMU interrupts in radix may come in here. They will use r1, not | |
694 | * PACAKSAVE, so this stack switch will not cause a problem. They | |
695 | * will store to the process stack, which may then be migrated to | |
696 | * another CPU. However the rq lock release on this CPU paired with | |
697 | * the rq lock acquire on the new CPU before the stack becomes | |
698 | * active on the new CPU, will order those stores. | |
699 | */ | |
9994a338 PM |
700 | mr r1,r8 /* start using new stack pointer */ |
701 | std r7,PACAKSAVE(r13) | |
702 | ||
71433285 AB |
703 | ld r6,_CCR(r1) |
704 | mtcrf 0xFF,r6 | |
705 | ||
9994a338 PM |
706 | /* r3-r13 are destroyed -- Cort */ |
707 | REST_8GPRS(14, r1) | |
708 | REST_10GPRS(22, r1) | |
709 | ||
710 | /* convert old thread to its task_struct for return value */ | |
711 | addi r3,r3,-THREAD | |
712 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | |
713 | mtlr r7 | |
714 | addi r1,r1,SWITCH_FRAME_SIZE | |
715 | blr | |
716 | ||
717 | .align 7 | |
718 | _GLOBAL(ret_from_except) | |
719 | ld r11,_TRAP(r1) | |
720 | andi. r0,r11,1 | |
b1576fec | 721 | bne ret_from_except_lite |
9994a338 PM |
722 | REST_NVGPRS(r1) |
723 | ||
724 | _GLOBAL(ret_from_except_lite) | |
725 | /* | |
726 | * Disable interrupts so that current_thread_info()->flags | |
727 | * can't change between when we test it and when we return | |
728 | * from the interrupt. | |
729 | */ | |
2d27cfd3 BH |
730 | #ifdef CONFIG_PPC_BOOK3E |
731 | wrteei 0 | |
732 | #else | |
49d09bf2 | 733 | li r10,MSR_RI |
d9ada91a | 734 | mtmsrd r10,1 /* Update machine state */ |
2d27cfd3 | 735 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 736 | |
9778b696 | 737 | CURRENT_THREAD_INFO(r9, r1) |
9994a338 | 738 | ld r3,_MSR(r1) |
13d543cd BB |
739 | #ifdef CONFIG_PPC_BOOK3E |
740 | ld r10,PACACURRENT(r13) | |
741 | #endif /* CONFIG_PPC_BOOK3E */ | |
9994a338 | 742 | ld r4,TI_FLAGS(r9) |
9994a338 | 743 | andi. r3,r3,MSR_PR |
c58ce2b1 | 744 | beq resume_kernel |
13d543cd BB |
745 | #ifdef CONFIG_PPC_BOOK3E |
746 | lwz r3,(THREAD+THREAD_DBCR0)(r10) | |
747 | #endif /* CONFIG_PPC_BOOK3E */ | |
9994a338 PM |
748 | |
749 | /* Check current_thread_info()->flags */ | |
c58ce2b1 | 750 | andi. r0,r4,_TIF_USER_WORK_MASK |
13d543cd | 751 | bne 1f |
70fe3d98 | 752 | #ifdef CONFIG_PPC_BOOK3E |
13d543cd BB |
753 | /* |
754 | * Check to see if the dbcr0 register is set up to debug. | |
755 | * Use the internal debug mode bit to do this. | |
756 | */ | |
757 | andis. r0,r3,DBCR0_IDM@h | |
c58ce2b1 | 758 | beq restore |
13d543cd BB |
759 | mfmsr r0 |
760 | rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ | |
761 | mtmsr r0 | |
762 | mtspr SPRN_DBCR0,r3 | |
763 | li r10, -1 | |
764 | mtspr SPRN_DBSR,r10 | |
765 | b restore | |
766 | #else | |
70fe3d98 CB |
767 | addi r3,r1,STACK_FRAME_OVERHEAD |
768 | bl restore_math | |
769 | b restore | |
13d543cd BB |
770 | #endif |
771 | 1: andi. r0,r4,_TIF_NEED_RESCHED | |
772 | beq 2f | |
b1576fec | 773 | bl restore_interrupts |
5d1c5745 | 774 | SCHEDULE_USER |
b1576fec | 775 | b ret_from_except_lite |
d31626f7 PM |
776 | 2: |
777 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
778 | andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM | |
779 | bne 3f /* only restore TM if nothing else to do */ | |
780 | addi r3,r1,STACK_FRAME_OVERHEAD | |
b1576fec | 781 | bl restore_tm_state |
d31626f7 PM |
782 | b restore |
783 | 3: | |
784 | #endif | |
b1576fec | 785 | bl save_nvgprs |
808be314 AB |
786 | /* |
787 | * Use a non volatile GPR to save and restore our thread_info flags | |
788 | * across the call to restore_interrupts. | |
789 | */ | |
790 | mr r30,r4 | |
b1576fec | 791 | bl restore_interrupts |
808be314 | 792 | mr r4,r30 |
c58ce2b1 | 793 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec AB |
794 | bl do_notify_resume |
795 | b ret_from_except | |
c58ce2b1 TC |
796 | |
797 | resume_kernel: | |
a9c4e541 | 798 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
0edfdd10 | 799 | andis. r8,r4,_TIF_EMULATE_STACK_STORE@h |
a9c4e541 TC |
800 | beq+ 1f |
801 | ||
802 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ | |
803 | ||
9e1ba4f2 | 804 | ld r3,GPR1(r1) |
a9c4e541 TC |
805 | subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ |
806 | mr r4,r1 /* src: current exception frame */ | |
807 | mr r1,r3 /* Reroute the trampoline frame to r1 */ | |
808 | ||
809 | /* Copy from the original to the trampoline. */ | |
810 | li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ | |
811 | li r6,0 /* start offset: 0 */ | |
812 | mtctr r5 | |
813 | 2: ldx r0,r6,r4 | |
814 | stdx r0,r6,r3 | |
815 | addi r6,r6,8 | |
816 | bdnz 2b | |
817 | ||
9e1ba4f2 RB |
818 | /* Do real store operation to complete stdu */ |
819 | ld r5,GPR1(r1) | |
a9c4e541 TC |
820 | std r8,0(r5) |
821 | ||
822 | /* Clear _TIF_EMULATE_STACK_STORE flag */ | |
823 | lis r11,_TIF_EMULATE_STACK_STORE@h | |
824 | addi r5,r9,TI_FLAGS | |
d8b92292 | 825 | 0: ldarx r4,0,r5 |
a9c4e541 TC |
826 | andc r4,r4,r11 |
827 | stdcx. r4,0,r5 | |
828 | bne- 0b | |
829 | 1: | |
830 | ||
c58ce2b1 TC |
831 | #ifdef CONFIG_PREEMPT |
832 | /* Check if we need to preempt */ | |
833 | andi. r0,r4,_TIF_NEED_RESCHED | |
834 | beq+ restore | |
835 | /* Check that preempt_count() == 0 and interrupts are enabled */ | |
836 | lwz r8,TI_PREEMPT(r9) | |
01417c6c MS |
837 | cmpwi cr0,r8,0 |
838 | bne restore | |
c58ce2b1 | 839 | ld r0,SOFTE(r1) |
01417c6c | 840 | andi. r0,r0,IRQS_DISABLED |
c58ce2b1 TC |
841 | bne restore |
842 | ||
843 | /* | |
844 | * Here we are preempting the current task. We want to make | |
de021bb7 | 845 | * sure we are soft-disabled first and reconcile irq state. |
c58ce2b1 | 846 | */ |
de021bb7 | 847 | RECONCILE_IRQ_STATE(r3,r4) |
b1576fec | 848 | 1: bl preempt_schedule_irq |
c58ce2b1 TC |
849 | |
850 | /* Re-test flags and eventually loop */ | |
9778b696 | 851 | CURRENT_THREAD_INFO(r9, r1) |
9994a338 | 852 | ld r4,TI_FLAGS(r9) |
c58ce2b1 TC |
853 | andi. r0,r4,_TIF_NEED_RESCHED |
854 | bne 1b | |
572177d7 TC |
855 | |
856 | /* | |
857 | * arch_local_irq_restore() from preempt_schedule_irq above may | |
858 | * enable hard interrupt but we really should disable interrupts | |
859 | * when we return from the interrupt, and so that we don't get | |
860 | * interrupted after loading SRR0/1. | |
861 | */ | |
862 | #ifdef CONFIG_PPC_BOOK3E | |
863 | wrteei 0 | |
864 | #else | |
49d09bf2 | 865 | li r10,MSR_RI |
572177d7 TC |
866 | mtmsrd r10,1 /* Update machine state */ |
867 | #endif /* CONFIG_PPC_BOOK3E */ | |
c58ce2b1 | 868 | #endif /* CONFIG_PREEMPT */ |
9994a338 | 869 | |
7230c564 BH |
870 | .globl fast_exc_return_irq |
871 | fast_exc_return_irq: | |
9994a338 | 872 | restore: |
7230c564 | 873 | /* |
7c0482e3 BH |
874 | * This is the main kernel exit path. First we check if we |
875 | * are about to re-enable interrupts | |
7230c564 | 876 | */ |
01f3880d | 877 | ld r5,SOFTE(r1) |
4e26bc4a | 878 | lbz r6,PACAIRQSOFTMASK(r13) |
01417c6c MS |
879 | andi. r5,r5,IRQS_DISABLED |
880 | bne .Lrestore_irq_off | |
7230c564 | 881 | |
7c0482e3 | 882 | /* We are enabling, were we already enabled ? Yes, just return */ |
01417c6c | 883 | andi. r6,r6,IRQS_DISABLED |
15770a13 | 884 | beq cr0,.Ldo_restore |
9994a338 | 885 | |
7c0482e3 | 886 | /* |
7230c564 BH |
887 | * We are about to soft-enable interrupts (we are hard disabled |
888 | * at this point). We check if there's anything that needs to | |
889 | * be replayed first. | |
890 | */ | |
891 | lbz r0,PACAIRQHAPPENED(r13) | |
892 | cmpwi cr0,r0,0 | |
15770a13 | 893 | bne- .Lrestore_check_irq_replay |
e56a6e20 | 894 | |
7230c564 BH |
895 | /* |
896 | * Get here when nothing happened while soft-disabled, just | |
897 | * soft-enable and move-on. We will hard-enable as a side | |
898 | * effect of rfi | |
899 | */ | |
15770a13 | 900 | .Lrestore_no_replay: |
7230c564 | 901 | TRACE_ENABLE_INTS |
c2e480ba | 902 | li r0,IRQS_ENABLED |
4e26bc4a | 903 | stb r0,PACAIRQSOFTMASK(r13); |
7230c564 BH |
904 | |
905 | /* | |
906 | * Final return path. BookE is handled in a different file | |
907 | */ | |
15770a13 | 908 | .Ldo_restore: |
2d27cfd3 | 909 | #ifdef CONFIG_PPC_BOOK3E |
b1576fec | 910 | b exception_return_book3e |
2d27cfd3 | 911 | #else |
7230c564 BH |
912 | /* |
913 | * Clear the reservation. If we know the CPU tracks the address of | |
914 | * the reservation then we can potentially save some cycles and use | |
915 | * a larx. On POWER6 and POWER7 this is significantly faster. | |
916 | */ | |
917 | BEGIN_FTR_SECTION | |
918 | stdcx. r0,0,r1 /* to clear the reservation */ | |
919 | FTR_SECTION_ELSE | |
920 | ldarx r4,0,r1 | |
921 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |
922 | ||
923 | /* | |
924 | * Some code path such as load_up_fpu or altivec return directly | |
925 | * here. They run entirely hard disabled and do not alter the | |
926 | * interrupt state. They also don't use lwarx/stwcx. and thus | |
927 | * are known not to leave dangling reservations. | |
928 | */ | |
929 | .globl fast_exception_return | |
930 | fast_exception_return: | |
931 | ld r3,_MSR(r1) | |
e56a6e20 PM |
932 | ld r4,_CTR(r1) |
933 | ld r0,_LINK(r1) | |
934 | mtctr r4 | |
935 | mtlr r0 | |
936 | ld r4,_XER(r1) | |
937 | mtspr SPRN_XER,r4 | |
938 | ||
939 | REST_8GPRS(5, r1) | |
940 | ||
9994a338 | 941 | andi. r0,r3,MSR_RI |
15770a13 | 942 | beq- .Lunrecov_restore |
9994a338 | 943 | |
e56a6e20 PM |
944 | /* |
945 | * Clear RI before restoring r13. If we are returning to | |
946 | * userspace and we take an exception after restoring r13, | |
947 | * we end up corrupting the userspace r13 value. | |
948 | */ | |
49d09bf2 | 949 | li r4,0 |
e56a6e20 | 950 | mtmsrd r4,1 |
9994a338 | 951 | |
afc07701 MN |
952 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
953 | /* TM debug */ | |
954 | std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */ | |
955 | #endif | |
9994a338 PM |
956 | /* |
957 | * r13 is our per cpu area, only restore it if we are returning to | |
7230c564 BH |
958 | * userspace the value stored in the stack frame may belong to |
959 | * another CPU. | |
9994a338 | 960 | */ |
e56a6e20 | 961 | andi. r0,r3,MSR_PR |
9994a338 | 962 | beq 1f |
0c4888ef | 963 | BEGIN_FTR_SECTION |
4c2de74c NP |
964 | /* Restore PPR */ |
965 | ld r2,_PPR(r1) | |
966 | mtspr SPRN_PPR,r2 | |
0c4888ef | 967 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
c223c903 | 968 | ACCOUNT_CPU_USER_EXIT(r13, r2, r4) |
9994a338 | 969 | REST_GPR(13, r1) |
a08f828c | 970 | |
e56a6e20 | 971 | mtspr SPRN_SRR1,r3 |
9994a338 PM |
972 | |
973 | ld r2,_CCR(r1) | |
974 | mtcrf 0xFF,r2 | |
975 | ld r2,_NIP(r1) | |
976 | mtspr SPRN_SRR0,r2 | |
977 | ||
978 | ld r0,GPR0(r1) | |
979 | ld r2,GPR2(r1) | |
980 | ld r3,GPR3(r1) | |
981 | ld r4,GPR4(r1) | |
982 | ld r1,GPR1(r1) | |
a08f828c NP |
983 | RFI_TO_USER |
984 | b . /* prevent speculative execution */ | |
9994a338 | 985 | |
a08f828c NP |
986 | 1: mtspr SPRN_SRR1,r3 |
987 | ||
988 | ld r2,_CCR(r1) | |
989 | mtcrf 0xFF,r2 | |
990 | ld r2,_NIP(r1) | |
991 | mtspr SPRN_SRR0,r2 | |
9994a338 | 992 | |
a08f828c NP |
993 | ld r0,GPR0(r1) |
994 | ld r2,GPR2(r1) | |
995 | ld r3,GPR3(r1) | |
996 | ld r4,GPR4(r1) | |
997 | ld r1,GPR1(r1) | |
998 | RFI_TO_KERNEL | |
9994a338 PM |
999 | b . /* prevent speculative execution */ |
1000 | ||
2d27cfd3 BH |
1001 | #endif /* CONFIG_PPC_BOOK3E */ |
1002 | ||
7c0482e3 BH |
1003 | /* |
1004 | * We are returning to a context with interrupts soft disabled. | |
1005 | * | |
1006 | * However, we may also about to hard enable, so we need to | |
1007 | * make sure that in this case, we also clear PACA_IRQ_HARD_DIS | |
1008 | * or that bit can get out of sync and bad things will happen | |
1009 | */ | |
15770a13 | 1010 | .Lrestore_irq_off: |
7c0482e3 BH |
1011 | ld r3,_MSR(r1) |
1012 | lbz r7,PACAIRQHAPPENED(r13) | |
1013 | andi. r0,r3,MSR_EE | |
1014 | beq 1f | |
1015 | rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS | |
1016 | stb r7,PACAIRQHAPPENED(r13) | |
acb1feab | 1017 | 1: |
9aa88188 | 1018 | #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) |
acb1feab | 1019 | /* The interrupt should not have soft enabled. */ |
4e26bc4a MS |
1020 | lbz r7,PACAIRQSOFTMASK(r13) |
1021 | 1: tdeqi r7,IRQS_ENABLED | |
acb1feab NP |
1022 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
1023 | #endif | |
15770a13 | 1024 | b .Ldo_restore |
7c0482e3 | 1025 | |
7230c564 BH |
1026 | /* |
1027 | * Something did happen, check if a re-emit is needed | |
1028 | * (this also clears paca->irq_happened) | |
1029 | */ | |
15770a13 | 1030 | .Lrestore_check_irq_replay: |
7230c564 BH |
1031 | /* XXX: We could implement a fast path here where we check |
1032 | * for irq_happened being just 0x01, in which case we can | |
1033 | * clear it and return. That means that we would potentially | |
1034 | * miss a decrementer having wrapped all the way around. | |
1035 | * | |
1036 | * Still, this might be useful for things like hash_page | |
1037 | */ | |
b1576fec | 1038 | bl __check_irq_replay |
7230c564 | 1039 | cmpwi cr0,r3,0 |
15770a13 | 1040 | beq .Lrestore_no_replay |
7230c564 BH |
1041 | |
1042 | /* | |
1043 | * We need to re-emit an interrupt. We do so by re-using our | |
1044 | * existing exception frame. We first change the trap value, | |
1045 | * but we need to ensure we preserve the low nibble of it | |
1046 | */ | |
1047 | ld r4,_TRAP(r1) | |
1048 | clrldi r4,r4,60 | |
1049 | or r4,r4,r3 | |
1050 | std r4,_TRAP(r1) | |
1051 | ||
9b81c021 NP |
1052 | /* |
1053 | * PACA_IRQ_HARD_DIS won't always be set here, so set it now | |
1054 | * to reconcile the IRQ state. Tracing is already accounted for. | |
1055 | */ | |
1056 | lbz r4,PACAIRQHAPPENED(r13) | |
1057 | ori r4,r4,PACA_IRQ_HARD_DIS | |
1058 | stb r4,PACAIRQHAPPENED(r13) | |
1059 | ||
7230c564 BH |
1060 | /* |
1061 | * Then find the right handler and call it. Interrupts are | |
1062 | * still soft-disabled and we keep them that way. | |
1063 | */ | |
1064 | cmpwi cr0,r3,0x500 | |
1065 | bne 1f | |
1066 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec AB |
1067 | bl do_IRQ |
1068 | b ret_from_except | |
f442d004 MS |
1069 | 1: cmpwi cr0,r3,0xf00 |
1070 | bne 1f | |
1071 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
1072 | bl performance_monitor_exception | |
1073 | b ret_from_except | |
0869b6fd MS |
1074 | 1: cmpwi cr0,r3,0xe60 |
1075 | bne 1f | |
1076 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
1077 | bl handle_hmi_exception | |
1078 | b ret_from_except | |
7230c564 BH |
1079 | 1: cmpwi cr0,r3,0x900 |
1080 | bne 1f | |
1081 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec AB |
1082 | bl timer_interrupt |
1083 | b ret_from_except | |
fe9e1d54 IM |
1084 | #ifdef CONFIG_PPC_DOORBELL |
1085 | 1: | |
7230c564 | 1086 | #ifdef CONFIG_PPC_BOOK3E |
fe9e1d54 IM |
1087 | cmpwi cr0,r3,0x280 |
1088 | #else | |
d6f73fc6 | 1089 | cmpwi cr0,r3,0xa00 |
fe9e1d54 | 1090 | #endif /* CONFIG_PPC_BOOK3E */ |
7230c564 BH |
1091 | bne 1f |
1092 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec | 1093 | bl doorbell_exception |
fe9e1d54 | 1094 | #endif /* CONFIG_PPC_DOORBELL */ |
b1576fec | 1095 | 1: b ret_from_except /* What else to do here ? */ |
7230c564 | 1096 | |
15770a13 | 1097 | .Lunrecov_restore: |
9994a338 | 1098 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec | 1099 | bl unrecoverable_exception |
15770a13 NR |
1100 | b .Lunrecov_restore |
1101 | ||
1102 | _ASM_NOKPROBE_SYMBOL(ret_from_except); | |
1103 | _ASM_NOKPROBE_SYMBOL(ret_from_except_lite); | |
1104 | _ASM_NOKPROBE_SYMBOL(resume_kernel); | |
1105 | _ASM_NOKPROBE_SYMBOL(fast_exc_return_irq); | |
1106 | _ASM_NOKPROBE_SYMBOL(restore); | |
1107 | _ASM_NOKPROBE_SYMBOL(fast_exception_return); | |
1108 | ||
9994a338 PM |
1109 | |
1110 | #ifdef CONFIG_PPC_RTAS | |
1111 | /* | |
1112 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | |
1113 | * called with the MMU off. | |
1114 | * | |
1115 | * In addition, we need to be in 32b mode, at least for now. | |
1116 | * | |
1117 | * Note: r3 is an input parameter to rtas, so don't trash it... | |
1118 | */ | |
1119 | _GLOBAL(enter_rtas) | |
1120 | mflr r0 | |
1121 | std r0,16(r1) | |
ed9e84a4 | 1122 | stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */ |
9994a338 PM |
1123 | |
1124 | /* Because RTAS is running in 32b mode, it clobbers the high order half | |
1125 | * of all registers that it saves. We therefore save those registers | |
1126 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) | |
1127 | */ | |
1128 | SAVE_GPR(2, r1) /* Save the TOC */ | |
1129 | SAVE_GPR(13, r1) /* Save paca */ | |
1130 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ | |
1131 | SAVE_10GPRS(22, r1) /* ditto */ | |
1132 | ||
1133 | mfcr r4 | |
1134 | std r4,_CCR(r1) | |
1135 | mfctr r5 | |
1136 | std r5,_CTR(r1) | |
1137 | mfspr r6,SPRN_XER | |
1138 | std r6,_XER(r1) | |
1139 | mfdar r7 | |
1140 | std r7,_DAR(r1) | |
1141 | mfdsisr r8 | |
1142 | std r8,_DSISR(r1) | |
9994a338 | 1143 | |
9fe901d1 MK |
1144 | /* Temporary workaround to clear CR until RTAS can be modified to |
1145 | * ignore all bits. | |
1146 | */ | |
1147 | li r0,0 | |
1148 | mtcr r0 | |
1149 | ||
01417c6c | 1150 | #ifdef CONFIG_BUG |
9994a338 PM |
1151 | /* There is no way it is acceptable to get here with interrupts enabled, |
1152 | * check it with the asm equivalent of WARN_ON | |
1153 | */ | |
4e26bc4a | 1154 | lbz r0,PACAIRQSOFTMASK(r13) |
01417c6c | 1155 | 1: tdeqi r0,IRQS_ENABLED |
007d88d0 DW |
1156 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
1157 | #endif | |
01417c6c | 1158 | |
d04c56f7 PM |
1159 | /* Hard-disable interrupts */ |
1160 | mfmsr r6 | |
1161 | rldicl r7,r6,48,1 | |
1162 | rotldi r7,r7,16 | |
1163 | mtmsrd r7,1 | |
1164 | ||
9994a338 PM |
1165 | /* Unfortunately, the stack pointer and the MSR are also clobbered, |
1166 | * so they are saved in the PACA which allows us to restore | |
1167 | * our original state after RTAS returns. | |
1168 | */ | |
1169 | std r1,PACAR1(r13) | |
1170 | std r6,PACASAVEDMSR(r13) | |
1171 | ||
1172 | /* Setup our real return addr */ | |
ad0289e4 | 1173 | LOAD_REG_ADDR(r4,rtas_return_loc) |
e58c3495 | 1174 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 PM |
1175 | mtlr r4 |
1176 | ||
1177 | li r0,0 | |
1178 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI | |
1179 | andc r0,r6,r0 | |
1180 | ||
1181 | li r9,1 | |
1182 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | |
5c0484e2 | 1183 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE |
9994a338 | 1184 | andc r6,r0,r9 |
90653a84 NR |
1185 | |
1186 | __enter_rtas: | |
9994a338 PM |
1187 | sync /* disable interrupts so SRR0/1 */ |
1188 | mtmsrd r0 /* don't get trashed */ | |
1189 | ||
e58c3495 | 1190 | LOAD_REG_ADDR(r4, rtas) |
9994a338 PM |
1191 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
1192 | ld r4,RTASBASE(r4) /* get the rtas->base value */ | |
1193 | ||
1194 | mtspr SPRN_SRR0,r5 | |
1195 | mtspr SPRN_SRR1,r6 | |
222f20f1 | 1196 | RFI_TO_KERNEL |
9994a338 PM |
1197 | b . /* prevent speculative execution */ |
1198 | ||
ad0289e4 | 1199 | rtas_return_loc: |
5c0484e2 BH |
1200 | FIXUP_ENDIAN |
1201 | ||
47fee31d NP |
1202 | /* |
1203 | * Clear RI and set SF before anything. | |
1204 | */ | |
1205 | mfmsr r6 | |
1206 | li r0,MSR_RI | |
1207 | andc r6,r6,r0 | |
1208 | sldi r0,r0,(MSR_SF_LG - MSR_RI_LG) | |
1209 | or r6,r6,r0 | |
1210 | sync | |
1211 | mtmsrd r6 | |
1212 | ||
9994a338 | 1213 | /* relocation is off at this point */ |
2dd60d79 | 1214 | GET_PACA(r4) |
e58c3495 | 1215 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 | 1216 | |
e31aa453 PM |
1217 | bcl 20,31,$+4 |
1218 | 0: mflr r3 | |
ad0289e4 | 1219 | ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ |
e31aa453 | 1220 | |
9994a338 | 1221 | ld r1,PACAR1(r4) /* Restore our SP */ |
9994a338 PM |
1222 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
1223 | ||
1224 | mtspr SPRN_SRR0,r3 | |
1225 | mtspr SPRN_SRR1,r4 | |
222f20f1 | 1226 | RFI_TO_KERNEL |
9994a338 | 1227 | b . /* prevent speculative execution */ |
90653a84 NR |
1228 | _ASM_NOKPROBE_SYMBOL(__enter_rtas) |
1229 | _ASM_NOKPROBE_SYMBOL(rtas_return_loc) | |
9994a338 | 1230 | |
e31aa453 | 1231 | .align 3 |
eb039161 | 1232 | 1: .8byte rtas_restore_regs |
e31aa453 | 1233 | |
ad0289e4 | 1234 | rtas_restore_regs: |
9994a338 PM |
1235 | /* relocation is on at this point */ |
1236 | REST_GPR(2, r1) /* Restore the TOC */ | |
1237 | REST_GPR(13, r1) /* Restore paca */ | |
1238 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | |
1239 | REST_10GPRS(22, r1) /* ditto */ | |
1240 | ||
2dd60d79 | 1241 | GET_PACA(r13) |
9994a338 PM |
1242 | |
1243 | ld r4,_CCR(r1) | |
1244 | mtcr r4 | |
1245 | ld r5,_CTR(r1) | |
1246 | mtctr r5 | |
1247 | ld r6,_XER(r1) | |
1248 | mtspr SPRN_XER,r6 | |
1249 | ld r7,_DAR(r1) | |
1250 | mtdar r7 | |
1251 | ld r8,_DSISR(r1) | |
1252 | mtdsisr r8 | |
9994a338 | 1253 | |
ed9e84a4 | 1254 | addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */ |
9994a338 PM |
1255 | ld r0,16(r1) /* get return address */ |
1256 | ||
1257 | mtlr r0 | |
1258 | blr /* return to caller */ | |
1259 | ||
1260 | #endif /* CONFIG_PPC_RTAS */ | |
1261 | ||
9994a338 PM |
1262 | _GLOBAL(enter_prom) |
1263 | mflr r0 | |
1264 | std r0,16(r1) | |
ed9e84a4 | 1265 | stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */ |
9994a338 PM |
1266 | |
1267 | /* Because PROM is running in 32b mode, it clobbers the high order half | |
1268 | * of all registers that it saves. We therefore save those registers | |
1269 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) | |
1270 | */ | |
6c171994 | 1271 | SAVE_GPR(2, r1) |
9994a338 PM |
1272 | SAVE_GPR(13, r1) |
1273 | SAVE_8GPRS(14, r1) | |
1274 | SAVE_10GPRS(22, r1) | |
6c171994 | 1275 | mfcr r10 |
9994a338 | 1276 | mfmsr r11 |
6c171994 | 1277 | std r10,_CCR(r1) |
9994a338 PM |
1278 | std r11,_MSR(r1) |
1279 | ||
5c0484e2 BH |
1280 | /* Put PROM address in SRR0 */ |
1281 | mtsrr0 r4 | |
1282 | ||
1283 | /* Setup our trampoline return addr in LR */ | |
1284 | bcl 20,31,$+4 | |
1285 | 0: mflr r4 | |
1286 | addi r4,r4,(1f - 0b) | |
1287 | mtlr r4 | |
9994a338 | 1288 | |
5c0484e2 | 1289 | /* Prepare a 32-bit mode big endian MSR |
9994a338 | 1290 | */ |
2d27cfd3 BH |
1291 | #ifdef CONFIG_PPC_BOOK3E |
1292 | rlwinm r11,r11,0,1,31 | |
5c0484e2 BH |
1293 | mtsrr1 r11 |
1294 | rfi | |
2d27cfd3 | 1295 | #else /* CONFIG_PPC_BOOK3E */ |
5c0484e2 BH |
1296 | LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) |
1297 | andc r11,r11,r12 | |
1298 | mtsrr1 r11 | |
222f20f1 | 1299 | RFI_TO_KERNEL |
2d27cfd3 | 1300 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 1301 | |
5c0484e2 BH |
1302 | 1: /* Return from OF */ |
1303 | FIXUP_ENDIAN | |
9994a338 PM |
1304 | |
1305 | /* Just make sure that r1 top 32 bits didn't get | |
1306 | * corrupt by OF | |
1307 | */ | |
1308 | rldicl r1,r1,0,32 | |
1309 | ||
1310 | /* Restore the MSR (back to 64 bits) */ | |
1311 | ld r0,_MSR(r1) | |
6c171994 | 1312 | MTMSRD(r0) |
9994a338 PM |
1313 | isync |
1314 | ||
1315 | /* Restore other registers */ | |
1316 | REST_GPR(2, r1) | |
1317 | REST_GPR(13, r1) | |
1318 | REST_8GPRS(14, r1) | |
1319 | REST_10GPRS(22, r1) | |
1320 | ld r4,_CCR(r1) | |
1321 | mtcr r4 | |
ed9e84a4 JS |
1322 | |
1323 | addi r1,r1,SWITCH_FRAME_SIZE | |
9994a338 PM |
1324 | ld r0,16(r1) |
1325 | mtlr r0 | |
1326 | blr |