Commit | Line | Data |
---|---|---|
2874c5fd | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
9994a338 | 2 | /* |
9994a338 PM |
3 | * PowerPC version |
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
7 | * Adapted for Power Macintosh by Paul Mackerras. | |
8 | * Low-level exception handlers and MMU support | |
9 | * rewritten by Paul Mackerras. | |
10 | * Copyright (C) 1996 Paul Mackerras. | |
11 | * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net). | |
12 | * | |
13 | * This file contains the system call entry code, context switch | |
14 | * code, and exception/interrupt return code for PowerPC. | |
9994a338 PM |
15 | */ |
16 | ||
9994a338 | 17 | #include <linux/errno.h> |
c3525940 | 18 | #include <linux/err.h> |
9994a338 PM |
19 | #include <asm/unistd.h> |
20 | #include <asm/processor.h> | |
21 | #include <asm/page.h> | |
22 | #include <asm/mmu.h> | |
23 | #include <asm/thread_info.h> | |
ee13cb24 | 24 | #include <asm/code-patching-asm.h> |
9994a338 PM |
25 | #include <asm/ppc_asm.h> |
26 | #include <asm/asm-offsets.h> | |
27 | #include <asm/cputable.h> | |
3f639ee8 | 28 | #include <asm/firmware.h> |
007d88d0 | 29 | #include <asm/bug.h> |
ec2b36b9 | 30 | #include <asm/ptrace.h> |
945feb17 | 31 | #include <asm/irqflags.h> |
7230c564 | 32 | #include <asm/hw_irq.h> |
5d1c5745 | 33 | #include <asm/context_tracking.h> |
b4b56f9e | 34 | #include <asm/tm.h> |
8a649045 | 35 | #include <asm/ppc-opcode.h> |
51973a81 | 36 | #include <asm/barrier.h> |
9445aa1a | 37 | #include <asm/export.h> |
ec0c464c | 38 | #include <asm/asm-compat.h> |
222f20f1 NP |
39 | #ifdef CONFIG_PPC_BOOK3S |
40 | #include <asm/exception-64s.h> | |
41 | #else | |
42 | #include <asm/exception-64e.h> | |
43 | #endif | |
2c86cd18 | 44 | #include <asm/feature-fixups.h> |
890274c2 | 45 | #include <asm/kup.h> |
9994a338 PM |
46 | |
47 | /* | |
48 | * System calls. | |
49 | */ | |
50 | .section ".toc","aw" | |
c857c43b AB |
51 | SYS_CALL_TABLE: |
52 | .tc sys_call_table[TC],sys_call_table | |
9994a338 | 53 | |
fbf508da FK |
54 | COMPAT_SYS_CALL_TABLE: |
55 | .tc compat_sys_call_table[TC],compat_sys_call_table | |
56 | ||
9994a338 PM |
57 | /* This value is used to mark exception frames on the stack. */ |
58 | exception_marker: | |
ec2b36b9 | 59 | .tc ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER |
9994a338 PM |
60 | |
61 | .section ".text" | |
62 | .align 7 | |
63 | ||
9994a338 PM |
64 | .globl system_call_common |
65 | system_call_common: | |
b4b56f9e S |
66 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
67 | BEGIN_FTR_SECTION | |
68 | extrdi. r10, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */ | |
cf7d6fb0 | 69 | bne .Ltabort_syscall |
b4b56f9e S |
70 | END_FTR_SECTION_IFSET(CPU_FTR_TM) |
71 | #endif | |
9994a338 PM |
72 | andi. r10,r12,MSR_PR |
73 | mr r10,r1 | |
74 | addi r1,r1,-INT_FRAME_SIZE | |
75 | beq- 1f | |
76 | ld r1,PACAKSAVE(r13) | |
77 | 1: std r10,0(r1) | |
78 | std r11,_NIP(r1) | |
79 | std r12,_MSR(r1) | |
80 | std r0,GPR0(r1) | |
81 | std r10,GPR1(r1) | |
5d75b264 | 82 | beq 2f /* if from kernel mode */ |
10c5e83a DC |
83 | #ifdef CONFIG_PPC_FSL_BOOK3E |
84 | START_BTB_FLUSH_SECTION | |
85 | BTB_FLUSH(r10) | |
86 | END_BTB_FLUSH_SECTION | |
87 | #endif | |
c223c903 | 88 | ACCOUNT_CPU_USER_ENTRY(r13, r10, r11) |
5d75b264 | 89 | 2: std r2,GPR2(r1) |
9994a338 | 90 | std r3,GPR3(r1) |
fd6c40f3 | 91 | mfcr r2 |
9994a338 PM |
92 | std r4,GPR4(r1) |
93 | std r5,GPR5(r1) | |
94 | std r6,GPR6(r1) | |
95 | std r7,GPR7(r1) | |
96 | std r8,GPR8(r1) | |
97 | li r11,0 | |
98 | std r11,GPR9(r1) | |
99 | std r11,GPR10(r1) | |
100 | std r11,GPR11(r1) | |
101 | std r11,GPR12(r1) | |
823df435 | 102 | std r11,_XER(r1) |
82087414 | 103 | std r11,_CTR(r1) |
9994a338 | 104 | std r9,GPR13(r1) |
9994a338 | 105 | mflr r10 |
fd6c40f3 AB |
106 | /* |
107 | * This clears CR0.SO (bit 28), which is the error indication on | |
108 | * return from this system call. | |
109 | */ | |
110 | rldimi r2,r11,28,(63-28) | |
9994a338 | 111 | li r11,0xc01 |
9994a338 PM |
112 | std r10,_LINK(r1) |
113 | std r11,_TRAP(r1) | |
9994a338 | 114 | std r3,ORIG_GPR3(r1) |
fd6c40f3 | 115 | std r2,_CCR(r1) |
9994a338 PM |
116 | ld r2,PACATOC(r13) |
117 | addi r9,r1,STACK_FRAME_OVERHEAD | |
118 | ld r11,exception_marker@toc(r2) | |
119 | std r11,-16(r9) /* "regshere" marker */ | |
890274c2 ME |
120 | |
121 | kuap_check_amr r10, r11 | |
122 | ||
abf917cd | 123 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR) |
cf9efce0 PM |
124 | BEGIN_FW_FTR_SECTION |
125 | beq 33f | |
126 | /* if from user, see if there are any DTL entries to process */ | |
127 | ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ | |
128 | ld r11,PACA_DTL_RIDX(r13) /* get log read index */ | |
7ffcf8ec AB |
129 | addi r10,r10,LPPACA_DTLIDX |
130 | LDX_BE r10,0,r10 /* get log write index */ | |
cf9efce0 PM |
131 | cmpd cr1,r11,r10 |
132 | beq+ cr1,33f | |
b1576fec | 133 | bl accumulate_stolen_time |
cf9efce0 PM |
134 | REST_GPR(0,r1) |
135 | REST_4GPRS(3,r1) | |
136 | REST_2GPRS(7,r1) | |
137 | addi r9,r1,STACK_FRAME_OVERHEAD | |
138 | 33: | |
139 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | |
abf917cd | 140 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */ |
cf9efce0 | 141 | |
1421ae0b BH |
142 | /* |
143 | * A syscall should always be called with interrupts enabled | |
144 | * so we just unconditionally hard-enable here. When some kind | |
145 | * of irq tracing is used, we additionally check that condition | |
146 | * is correct | |
147 | */ | |
9aa88188 | 148 | #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) |
4e26bc4a | 149 | lbz r10,PACAIRQSOFTMASK(r13) |
01417c6c | 150 | 1: tdnei r10,IRQS_ENABLED |
1421ae0b BH |
151 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
152 | #endif | |
2d27cfd3 | 153 | |
2d27cfd3 BH |
154 | #ifdef CONFIG_PPC_BOOK3E |
155 | wrteei 1 | |
156 | #else | |
49d09bf2 | 157 | li r11,MSR_RI |
9994a338 PM |
158 | ori r11,r11,MSR_EE |
159 | mtmsrd r11,1 | |
2d27cfd3 | 160 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 161 | |
266de3a8 | 162 | system_call: /* label this so stack traces look sane */ |
1421ae0b BH |
163 | /* We do need to set SOFTE in the stack frame or the return |
164 | * from interrupt will be painful | |
165 | */ | |
c2e480ba | 166 | li r10,IRQS_ENABLED |
1421ae0b BH |
167 | std r10,SOFTE(r1) |
168 | ||
c911d2e1 | 169 | ld r11, PACA_THREAD_INFO(r13) |
9994a338 | 170 | ld r10,TI_FLAGS(r11) |
10ea8343 | 171 | andi. r11,r10,_TIF_SYSCALL_DOTRACE |
cf7d6fb0 | 172 | bne .Lsyscall_dotrace /* does not return */ |
9994a338 | 173 | cmpldi 0,r0,NR_syscalls |
cf7d6fb0 | 174 | bge- .Lsyscall_enosys |
9994a338 | 175 | |
266de3a8 | 176 | .Lsyscall: |
9994a338 PM |
177 | /* |
178 | * Need to vector to 32 Bit or default sys_call_table here, | |
179 | * based on caller's run-mode / personality. | |
180 | */ | |
c857c43b | 181 | ld r11,SYS_CALL_TABLE@toc(2) |
16d7c69c | 182 | andis. r10,r10,_TIF_32BIT@h |
9994a338 | 183 | beq 15f |
fbf508da | 184 | ld r11,COMPAT_SYS_CALL_TABLE@toc(2) |
9994a338 PM |
185 | clrldi r3,r3,32 |
186 | clrldi r4,r4,32 | |
187 | clrldi r5,r5,32 | |
188 | clrldi r6,r6,32 | |
189 | clrldi r7,r7,32 | |
190 | clrldi r8,r8,32 | |
191 | 15: | |
fbf508da | 192 | slwi r0,r0,3 |
51973a81 ME |
193 | |
194 | barrier_nospec_asm | |
195 | /* | |
196 | * Prevent the load of the handler below (based on the user-passed | |
197 | * system call number) being speculatively executed until the test | |
198 | * against NR_syscalls and branch to .Lsyscall_enosys above has | |
199 | * committed. | |
200 | */ | |
201 | ||
cc7efbf9 AB |
202 | ldx r12,r11,r0 /* Fetch system call handler [ptr] */ |
203 | mtctr r12 | |
9994a338 PM |
204 | bctrl /* Call handler */ |
205 | ||
4c3b2168 | 206 | .Lsyscall_exit: |
401d1f02 | 207 | std r3,RESULT(r1) |
6f37be4b BF |
208 | |
209 | #ifdef CONFIG_DEBUG_RSEQ | |
210 | /* Check whether the syscall is issued inside a restartable sequence */ | |
211 | addi r3,r1,STACK_FRAME_OVERHEAD | |
212 | bl rseq_syscall | |
213 | ld r3,RESULT(r1) | |
214 | #endif | |
215 | ||
c911d2e1 | 216 | ld r12, PACA_THREAD_INFO(r13) |
9994a338 | 217 | |
9994a338 | 218 | ld r8,_MSR(r1) |
2d27cfd3 BH |
219 | #ifdef CONFIG_PPC_BOOK3S |
220 | /* No MSR:RI on BookE */ | |
9994a338 | 221 | andi. r10,r8,MSR_RI |
15770a13 | 222 | beq- .Lunrecov_restore |
2d27cfd3 | 223 | #endif |
3639d661 NR |
224 | |
225 | /* | |
226 | * This is a few instructions into the actual syscall exit path (which actually | |
227 | * starts at .Lsyscall_exit) to cater to kprobe blacklisting and to reduce the | |
228 | * number of visible symbols for profiling purposes. | |
229 | * | |
230 | * We can probe from system_call until this point as MSR_RI is set. But once it | |
231 | * is cleared below, we won't be able to take a trap. | |
232 | * | |
233 | * This is blacklisted from kprobes further below with _ASM_NOKPROBE_SYMBOL(). | |
234 | */ | |
235 | system_call_exit: | |
1421ae0b BH |
236 | /* |
237 | * Disable interrupts so current_thread_info()->flags can't change, | |
2d27cfd3 | 238 | * and so that we don't get interrupted after loading SRR0/1. |
e7fda7e5 ME |
239 | * |
240 | * Leave MSR_RI enabled for now, because with THREAD_INFO_IN_TASK we | |
241 | * could fault on the load of the TI_FLAGS below. | |
2d27cfd3 BH |
242 | */ |
243 | #ifdef CONFIG_PPC_BOOK3E | |
244 | wrteei 0 | |
245 | #else | |
e7fda7e5 | 246 | li r11,MSR_RI |
ac1dc365 | 247 | mtmsrd r11,1 |
2d27cfd3 BH |
248 | #endif /* CONFIG_PPC_BOOK3E */ |
249 | ||
9994a338 | 250 | ld r9,TI_FLAGS(r12) |
c3525940 | 251 | li r11,-MAX_ERRNO |
10ea8343 | 252 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) |
cf7d6fb0 | 253 | bne- .Lsyscall_exit_work |
70fe3d98 | 254 | |
44a12806 ME |
255 | andi. r0,r8,MSR_FP |
256 | beq 2f | |
70fe3d98 | 257 | #ifdef CONFIG_ALTIVEC |
44a12806 ME |
258 | andis. r0,r8,MSR_VEC@h |
259 | bne 3f | |
70fe3d98 | 260 | #endif |
44a12806 | 261 | 2: addi r3,r1,STACK_FRAME_OVERHEAD |
44a12806 | 262 | bl restore_math |
44a12806 ME |
263 | ld r8,_MSR(r1) |
264 | ld r3,RESULT(r1) | |
265 | li r11,-MAX_ERRNO | |
70fe3d98 | 266 | |
44a12806 | 267 | 3: cmpld r3,r11 |
401d1f02 | 268 | ld r5,_CCR(r1) |
cf7d6fb0 | 269 | bge- .Lsyscall_error |
d14299de | 270 | .Lsyscall_error_cont: |
9994a338 | 271 | ld r7,_NIP(r1) |
f89451fb | 272 | BEGIN_FTR_SECTION |
9994a338 | 273 | stdcx. r0,0,r1 /* to clear the reservation */ |
f89451fb | 274 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) |
9994a338 PM |
275 | andi. r6,r8,MSR_PR |
276 | ld r4,_LINK(r1) | |
2d27cfd3 | 277 | |
890274c2 ME |
278 | kuap_check_amr r10, r11 |
279 | ||
e7fda7e5 ME |
280 | #ifdef CONFIG_PPC_BOOK3S |
281 | /* | |
282 | * Clear MSR_RI, MSR_EE is already and remains disabled. We could do | |
283 | * this later, but testing shows that doing it here causes less slow | |
284 | * down than doing it closer to the rfid. | |
285 | */ | |
286 | li r11,0 | |
287 | mtmsrd r11,1 | |
288 | #endif | |
289 | ||
c6622f63 | 290 | beq- 1f |
c223c903 | 291 | ACCOUNT_CPU_USER_EXIT(r13, r11, r12) |
d030a4b5 ME |
292 | |
293 | BEGIN_FTR_SECTION | |
294 | HMT_MEDIUM_LOW | |
295 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |
296 | ||
63a0d6b0 BL |
297 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
298 | std r8, PACATMSCRATCH(r13) | |
299 | #endif | |
300 | ||
890274c2 ME |
301 | /* |
302 | * We don't need to restore AMR on the way back to userspace for KUAP. | |
303 | * The value of AMR only matters while we're in the kernel. | |
304 | */ | |
c6622f63 | 305 | ld r13,GPR13(r1) /* only restore r13 if returning to usermode */ |
b8e90cb7 NP |
306 | ld r2,GPR2(r1) |
307 | ld r1,GPR1(r1) | |
308 | mtlr r4 | |
309 | mtcr r5 | |
310 | mtspr SPRN_SRR0,r7 | |
311 | mtspr SPRN_SRR1,r8 | |
312 | RFI_TO_USER | |
313 | b . /* prevent speculative execution */ | |
314 | ||
890274c2 ME |
315 | 1: /* exit to kernel */ |
316 | kuap_restore_amr r2 | |
317 | ||
318 | ld r2,GPR2(r1) | |
9994a338 PM |
319 | ld r1,GPR1(r1) |
320 | mtlr r4 | |
321 | mtcr r5 | |
322 | mtspr SPRN_SRR0,r7 | |
323 | mtspr SPRN_SRR1,r8 | |
b8e90cb7 | 324 | RFI_TO_KERNEL |
9994a338 PM |
325 | b . /* prevent speculative execution */ |
326 | ||
cf7d6fb0 | 327 | .Lsyscall_error: |
9994a338 | 328 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
401d1f02 | 329 | neg r3,r3 |
9994a338 | 330 | std r5,_CCR(r1) |
d14299de | 331 | b .Lsyscall_error_cont |
bc4f65e4 | 332 | |
9994a338 | 333 | /* Traced system call support */ |
cf7d6fb0 | 334 | .Lsyscall_dotrace: |
b1576fec | 335 | bl save_nvgprs |
9994a338 | 336 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec | 337 | bl do_syscall_trace_enter |
d3837414 | 338 | |
4f72c427 | 339 | /* |
d3837414 ME |
340 | * We use the return value of do_syscall_trace_enter() as the syscall |
341 | * number. If the syscall was rejected for any reason do_syscall_trace_enter() | |
342 | * returns an invalid syscall number and the test below against | |
343 | * NR_syscalls will fail. | |
4f72c427 RM |
344 | */ |
345 | mr r0,r3 | |
d3837414 ME |
346 | |
347 | /* Restore argument registers just clobbered and/or possibly changed. */ | |
9994a338 PM |
348 | ld r3,GPR3(r1) |
349 | ld r4,GPR4(r1) | |
350 | ld r5,GPR5(r1) | |
351 | ld r6,GPR6(r1) | |
352 | ld r7,GPR7(r1) | |
353 | ld r8,GPR8(r1) | |
d3837414 | 354 | |
266de3a8 | 355 | /* Repopulate r9 and r10 for the syscall path */ |
9994a338 | 356 | addi r9,r1,STACK_FRAME_OVERHEAD |
c911d2e1 | 357 | ld r10, PACA_THREAD_INFO(r13) |
9994a338 | 358 | ld r10,TI_FLAGS(r10) |
d3837414 ME |
359 | |
360 | cmpldi r0,NR_syscalls | |
266de3a8 | 361 | blt+ .Lsyscall |
d3837414 ME |
362 | |
363 | /* Return code is already in r3 thanks to do_syscall_trace_enter() */ | |
364 | b .Lsyscall_exit | |
365 | ||
9994a338 | 366 | |
cf7d6fb0 | 367 | .Lsyscall_enosys: |
401d1f02 | 368 | li r3,-ENOSYS |
4c3b2168 | 369 | b .Lsyscall_exit |
401d1f02 | 370 | |
cf7d6fb0 | 371 | .Lsyscall_exit_work: |
401d1f02 DW |
372 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. |
373 | If TIF_NOERROR is set, just save r3 as it is. */ | |
374 | ||
375 | andi. r0,r9,_TIF_RESTOREALL | |
1bd79336 PM |
376 | beq+ 0f |
377 | REST_NVGPRS(r1) | |
378 | b 2f | |
c3525940 | 379 | 0: cmpld r3,r11 /* r11 is -MAX_ERRNO */ |
401d1f02 DW |
380 | blt+ 1f |
381 | andi. r0,r9,_TIF_NOERROR | |
382 | bne- 1f | |
383 | ld r5,_CCR(r1) | |
384 | neg r3,r3 | |
385 | oris r5,r5,0x1000 /* Set SO bit in CR */ | |
386 | std r5,_CCR(r1) | |
387 | 1: std r3,GPR3(r1) | |
388 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | |
389 | beq 4f | |
390 | ||
1bd79336 | 391 | /* Clear per-syscall TIF flags if any are set. */ |
401d1f02 DW |
392 | |
393 | li r11,_TIF_PERSYSCALL_MASK | |
394 | addi r12,r12,TI_FLAGS | |
395 | 3: ldarx r10,0,r12 | |
396 | andc r10,r10,r11 | |
397 | stdcx. r10,0,r12 | |
398 | bne- 3b | |
399 | subi r12,r12,TI_FLAGS | |
1bd79336 PM |
400 | |
401 | 4: /* Anything else left to do? */ | |
d8725ce8 | 402 | BEGIN_FTR_SECTION |
4c2de74c | 403 | lis r3,DEFAULT_PPR@highest /* Set default PPR */ |
d8725ce8 | 404 | sldi r3,r3,32 /* bits 11-13 are used for ppr */ |
4c2de74c | 405 | std r3,_PPR(r1) |
d8725ce8 ME |
406 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
407 | ||
10ea8343 | 408 | andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) |
b1576fec | 409 | beq ret_from_except_lite |
401d1f02 DW |
410 | |
411 | /* Re-enable interrupts */ | |
2d27cfd3 BH |
412 | #ifdef CONFIG_PPC_BOOK3E |
413 | wrteei 1 | |
414 | #else | |
49d09bf2 | 415 | li r10,MSR_RI |
401d1f02 DW |
416 | ori r10,r10,MSR_EE |
417 | mtmsrd r10,1 | |
2d27cfd3 | 418 | #endif /* CONFIG_PPC_BOOK3E */ |
401d1f02 | 419 | |
b1576fec | 420 | bl save_nvgprs |
9994a338 | 421 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec AB |
422 | bl do_syscall_trace_leave |
423 | b ret_from_except | |
9994a338 | 424 | |
b4b56f9e | 425 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
cf7d6fb0 | 426 | .Ltabort_syscall: |
b4b56f9e S |
427 | /* Firstly we need to enable TM in the kernel */ |
428 | mfmsr r10 | |
cc7786d3 NP |
429 | li r9, 1 |
430 | rldimi r10, r9, MSR_TM_LG, 63-MSR_TM_LG | |
b4b56f9e S |
431 | mtmsrd r10, 0 |
432 | ||
433 | /* tabort, this dooms the transaction, nothing else */ | |
cc7786d3 NP |
434 | li r9, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT) |
435 | TABORT(R9) | |
b4b56f9e S |
436 | |
437 | /* | |
438 | * Return directly to userspace. We have corrupted user register state, | |
439 | * but userspace will never see that register state. Execution will | |
440 | * resume after the tbegin of the aborted transaction with the | |
441 | * checkpointed register state. | |
442 | */ | |
cc7786d3 NP |
443 | li r9, MSR_RI |
444 | andc r10, r10, r9 | |
b4b56f9e S |
445 | mtmsrd r10, 1 |
446 | mtspr SPRN_SRR0, r11 | |
447 | mtspr SPRN_SRR1, r12 | |
222f20f1 | 448 | RFI_TO_USER |
b4b56f9e S |
449 | b . /* prevent speculative execution */ |
450 | #endif | |
cf7d6fb0 | 451 | _ASM_NOKPROBE_SYMBOL(system_call_common); |
3639d661 | 452 | _ASM_NOKPROBE_SYMBOL(system_call_exit); |
b4b56f9e | 453 | |
9994a338 PM |
454 | /* Save non-volatile GPRs, if not already saved. */ |
455 | _GLOBAL(save_nvgprs) | |
456 | ld r11,_TRAP(r1) | |
457 | andi. r0,r11,1 | |
458 | beqlr- | |
459 | SAVE_NVGPRS(r1) | |
460 | clrrdi r0,r11,1 | |
461 | std r0,_TRAP(r1) | |
462 | blr | |
15770a13 | 463 | _ASM_NOKPROBE_SYMBOL(save_nvgprs); |
9994a338 | 464 | |
401d1f02 | 465 | |
9994a338 PM |
466 | /* |
467 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | |
468 | * and thus put the process into the stopped state where we might | |
469 | * want to examine its user state with ptrace. Therefore we need | |
470 | * to save all the nonvolatile registers (r14 - r31) before calling | |
471 | * the C code. Similarly, fork, vfork and clone need the full | |
472 | * register state on the stack so that it can be copied to the child. | |
473 | */ | |
9994a338 PM |
474 | |
475 | _GLOBAL(ppc_fork) | |
b1576fec AB |
476 | bl save_nvgprs |
477 | bl sys_fork | |
4c3b2168 | 478 | b .Lsyscall_exit |
9994a338 PM |
479 | |
480 | _GLOBAL(ppc_vfork) | |
b1576fec AB |
481 | bl save_nvgprs |
482 | bl sys_vfork | |
4c3b2168 | 483 | b .Lsyscall_exit |
9994a338 PM |
484 | |
485 | _GLOBAL(ppc_clone) | |
b1576fec AB |
486 | bl save_nvgprs |
487 | bl sys_clone | |
4c3b2168 | 488 | b .Lsyscall_exit |
9994a338 | 489 | |
1bd79336 | 490 | _GLOBAL(ppc32_swapcontext) |
b1576fec AB |
491 | bl save_nvgprs |
492 | bl compat_sys_swapcontext | |
4c3b2168 | 493 | b .Lsyscall_exit |
1bd79336 PM |
494 | |
495 | _GLOBAL(ppc64_swapcontext) | |
b1576fec AB |
496 | bl save_nvgprs |
497 | bl sys_swapcontext | |
4c3b2168 | 498 | b .Lsyscall_exit |
1bd79336 | 499 | |
529d235a ME |
500 | _GLOBAL(ppc_switch_endian) |
501 | bl save_nvgprs | |
502 | bl sys_switch_endian | |
503 | b .Lsyscall_exit | |
504 | ||
9994a338 | 505 | _GLOBAL(ret_from_fork) |
b1576fec | 506 | bl schedule_tail |
9994a338 PM |
507 | REST_NVGPRS(r1) |
508 | li r3,0 | |
4c3b2168 | 509 | b .Lsyscall_exit |
9994a338 | 510 | |
58254e10 | 511 | _GLOBAL(ret_from_kernel_thread) |
b1576fec | 512 | bl schedule_tail |
58254e10 | 513 | REST_NVGPRS(r1) |
58254e10 AV |
514 | mtlr r14 |
515 | mr r3,r15 | |
f55d9665 | 516 | #ifdef PPC64_ELF_ABI_v2 |
7cedd601 AB |
517 | mr r12,r14 |
518 | #endif | |
58254e10 AV |
519 | blrl |
520 | li r3,0 | |
4c3b2168 | 521 | b .Lsyscall_exit |
be6abfa7 | 522 | |
ee13cb24 ME |
523 | #ifdef CONFIG_PPC_BOOK3S_64 |
524 | ||
525 | #define FLUSH_COUNT_CACHE \ | |
526 | 1: nop; \ | |
527 | patch_site 1b, patch__call_flush_count_cache | |
528 | ||
529 | ||
530 | #define BCCTR_FLUSH .long 0x4c400420 | |
531 | ||
532 | .macro nops number | |
533 | .rept \number | |
534 | nop | |
535 | .endr | |
536 | .endm | |
537 | ||
538 | .balign 32 | |
539 | .global flush_count_cache | |
540 | flush_count_cache: | |
541 | /* Save LR into r9 */ | |
542 | mflr r9 | |
543 | ||
544 | .rept 64 | |
545 | bl .+4 | |
546 | .endr | |
547 | b 1f | |
548 | nops 6 | |
549 | ||
550 | .balign 32 | |
551 | /* Restore LR */ | |
552 | 1: mtlr r9 | |
553 | li r9,0x7fff | |
554 | mtctr r9 | |
555 | ||
556 | BCCTR_FLUSH | |
557 | ||
558 | 2: nop | |
559 | patch_site 2b patch__flush_count_cache_return | |
560 | ||
561 | nops 3 | |
562 | ||
563 | .rept 278 | |
564 | .balign 32 | |
565 | BCCTR_FLUSH | |
566 | nops 7 | |
567 | .endr | |
568 | ||
569 | blr | |
570 | #else | |
571 | #define FLUSH_COUNT_CACHE | |
572 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
573 | ||
9994a338 PM |
574 | /* |
575 | * This routine switches between two different tasks. The process | |
576 | * state of one is saved on its kernel stack. Then the state | |
577 | * of the other is restored from its kernel stack. The memory | |
578 | * management hardware is updated to the second process's state. | |
579 | * Finally, we can return to the second process, via ret_from_except. | |
580 | * On entry, r3 points to the THREAD for the current task, r4 | |
581 | * points to the THREAD for the new task. | |
582 | * | |
583 | * Note: there are two ways to get to the "going out" portion | |
584 | * of this code; either by coming in via the entry (_switch) | |
585 | * or via "fork" which must set up an environment equivalent | |
586 | * to the "_switch" path. If you change this you'll have to change | |
587 | * the fork code also. | |
588 | * | |
589 | * The code which creates the new task context is in 'copy_thread' | |
2ef9481e | 590 | * in arch/powerpc/kernel/process.c |
9994a338 PM |
591 | */ |
592 | .align 7 | |
593 | _GLOBAL(_switch) | |
594 | mflr r0 | |
595 | std r0,16(r1) | |
596 | stdu r1,-SWITCH_FRAME_SIZE(r1) | |
597 | /* r3-r13 are caller saved -- Cort */ | |
598 | SAVE_8GPRS(14, r1) | |
599 | SAVE_10GPRS(22, r1) | |
68bfa962 | 600 | std r0,_NIP(r1) /* Return to switch caller */ |
9994a338 PM |
601 | mfcr r23 |
602 | std r23,_CCR(r1) | |
603 | std r1,KSP(r3) /* Set old stack pointer */ | |
604 | ||
890274c2 ME |
605 | kuap_check_amr r9, r10 |
606 | ||
ee13cb24 ME |
607 | FLUSH_COUNT_CACHE |
608 | ||
9145effd NP |
609 | /* |
610 | * On SMP kernels, care must be taken because a task may be | |
611 | * scheduled off CPUx and on to CPUy. Memory ordering must be | |
612 | * considered. | |
613 | * | |
614 | * Cacheable stores on CPUx will be visible when the task is | |
615 | * scheduled on CPUy by virtue of the core scheduler barriers | |
616 | * (see "Notes on Program-Order guarantees on SMP systems." in | |
617 | * kernel/sched/core.c). | |
618 | * | |
619 | * Uncacheable stores in the case of involuntary preemption must | |
620 | * be taken care of. The smp_mb__before_spin_lock() in __schedule() | |
621 | * is implemented as hwsync on powerpc, which orders MMIO too. So | |
622 | * long as there is an hwsync in the context switch path, it will | |
623 | * be executed on the source CPU after the task has performed | |
624 | * all MMIO ops on that CPU, and on the destination CPU before the | |
625 | * task performs any MMIO ops there. | |
9994a338 | 626 | */ |
9994a338 | 627 | |
f89451fb | 628 | /* |
837e72f7 NP |
629 | * The kernel context switch path must contain a spin_lock, |
630 | * which contains larx/stcx, which will clear any reservation | |
631 | * of the task being switched. | |
f89451fb | 632 | */ |
a515348f MN |
633 | #ifdef CONFIG_PPC_BOOK3S |
634 | /* Cancel all explict user streams as they will have no use after context | |
635 | * switch and will stop the HW from creating streams itself | |
636 | */ | |
15a3204d | 637 | DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6) |
a515348f MN |
638 | #endif |
639 | ||
9994a338 PM |
640 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
641 | std r6,PACACURRENT(r13) /* Set new 'current' */ | |
06ec27ae CL |
642 | #if defined(CONFIG_STACKPROTECTOR) |
643 | ld r6, TASK_CANARY(r6) | |
644 | std r6, PACA_CANARY(r13) | |
645 | #endif | |
9994a338 PM |
646 | |
647 | ld r8,KSP(r4) /* new stack pointer */ | |
4e003747 | 648 | #ifdef CONFIG_PPC_BOOK3S_64 |
caca285e AK |
649 | BEGIN_MMU_FTR_SECTION |
650 | b 2f | |
5a25b6f5 | 651 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX) |
1189be65 | 652 | BEGIN_FTR_SECTION |
9994a338 PM |
653 | clrrdi r6,r8,28 /* get its ESID */ |
654 | clrrdi r9,r1,28 /* get current sp ESID */ | |
13b3d13b | 655 | FTR_SECTION_ELSE |
1189be65 PM |
656 | clrrdi r6,r8,40 /* get its 1T ESID */ |
657 | clrrdi r9,r1,40 /* get current sp 1T ESID */ | |
13b3d13b | 658 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT) |
9994a338 PM |
659 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
660 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | |
661 | cror eq,4*cr1+eq,eq | |
662 | beq 2f /* if yes, don't slbie it */ | |
663 | ||
664 | /* Bolt in the new stack SLB entry */ | |
665 | ld r7,KSP_VSID(r4) /* Get new stack's VSID */ | |
666 | oris r0,r6,(SLB_ESID_V)@h | |
667 | ori r0,r0,(SLB_NUM_BOLTED-1)@l | |
1189be65 PM |
668 | BEGIN_FTR_SECTION |
669 | li r9,MMU_SEGSIZE_1T /* insert B field */ | |
670 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h | |
671 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 | |
44ae3ab3 | 672 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
2f6093c8 | 673 | |
00efee7d MN |
674 | /* Update the last bolted SLB. No write barriers are needed |
675 | * here, provided we only update the current CPU's SLB shadow | |
676 | * buffer. | |
677 | */ | |
2f6093c8 | 678 | ld r9,PACA_SLBSHADOWPTR(r13) |
11a27ad7 | 679 | li r12,0 |
7ffcf8ec AB |
680 | std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ |
681 | li r12,SLBSHADOW_STACKVSID | |
682 | STDX_BE r7,r12,r9 /* Save VSID */ | |
683 | li r12,SLBSHADOW_STACKESID | |
684 | STDX_BE r0,r12,r9 /* Save ESID */ | |
2f6093c8 | 685 | |
44ae3ab3 | 686 | /* No need to check for MMU_FTR_NO_SLBIE_B here, since when |
f66bce5e OJ |
687 | * we have 1TB segments, the only CPUs known to have the errata |
688 | * only support less than 1TB of system memory and we'll never | |
689 | * actually hit this code path. | |
690 | */ | |
691 | ||
91d06971 | 692 | isync |
9994a338 | 693 | slbie r6 |
505ea82e | 694 | BEGIN_FTR_SECTION |
9994a338 | 695 | slbie r6 /* Workaround POWER5 < DD2.1 issue */ |
505ea82e | 696 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
9994a338 PM |
697 | slbmte r7,r0 |
698 | isync | |
9994a338 | 699 | 2: |
4e003747 | 700 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
2d27cfd3 | 701 | |
7306e83c | 702 | clrrdi r7, r8, THREAD_SHIFT /* base of new stack */ |
9994a338 PM |
703 | /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE |
704 | because we don't need to leave the 288-byte ABI gap at the | |
705 | top of the kernel stack. */ | |
706 | addi r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE | |
707 | ||
e4c0fc5f NP |
708 | /* |
709 | * PMU interrupts in radix may come in here. They will use r1, not | |
710 | * PACAKSAVE, so this stack switch will not cause a problem. They | |
711 | * will store to the process stack, which may then be migrated to | |
712 | * another CPU. However the rq lock release on this CPU paired with | |
713 | * the rq lock acquire on the new CPU before the stack becomes | |
714 | * active on the new CPU, will order those stores. | |
715 | */ | |
9994a338 PM |
716 | mr r1,r8 /* start using new stack pointer */ |
717 | std r7,PACAKSAVE(r13) | |
718 | ||
71433285 AB |
719 | ld r6,_CCR(r1) |
720 | mtcrf 0xFF,r6 | |
721 | ||
9994a338 PM |
722 | /* r3-r13 are destroyed -- Cort */ |
723 | REST_8GPRS(14, r1) | |
724 | REST_10GPRS(22, r1) | |
725 | ||
726 | /* convert old thread to its task_struct for return value */ | |
727 | addi r3,r3,-THREAD | |
728 | ld r7,_NIP(r1) /* Return to _switch caller in new task */ | |
729 | mtlr r7 | |
730 | addi r1,r1,SWITCH_FRAME_SIZE | |
731 | blr | |
732 | ||
733 | .align 7 | |
734 | _GLOBAL(ret_from_except) | |
735 | ld r11,_TRAP(r1) | |
736 | andi. r0,r11,1 | |
b1576fec | 737 | bne ret_from_except_lite |
9994a338 PM |
738 | REST_NVGPRS(r1) |
739 | ||
740 | _GLOBAL(ret_from_except_lite) | |
741 | /* | |
742 | * Disable interrupts so that current_thread_info()->flags | |
743 | * can't change between when we test it and when we return | |
744 | * from the interrupt. | |
745 | */ | |
2d27cfd3 BH |
746 | #ifdef CONFIG_PPC_BOOK3E |
747 | wrteei 0 | |
748 | #else | |
49d09bf2 | 749 | li r10,MSR_RI |
d9ada91a | 750 | mtmsrd r10,1 /* Update machine state */ |
2d27cfd3 | 751 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 752 | |
c911d2e1 | 753 | ld r9, PACA_THREAD_INFO(r13) |
9994a338 | 754 | ld r3,_MSR(r1) |
13d543cd BB |
755 | #ifdef CONFIG_PPC_BOOK3E |
756 | ld r10,PACACURRENT(r13) | |
757 | #endif /* CONFIG_PPC_BOOK3E */ | |
9994a338 | 758 | ld r4,TI_FLAGS(r9) |
9994a338 | 759 | andi. r3,r3,MSR_PR |
c58ce2b1 | 760 | beq resume_kernel |
13d543cd BB |
761 | #ifdef CONFIG_PPC_BOOK3E |
762 | lwz r3,(THREAD+THREAD_DBCR0)(r10) | |
763 | #endif /* CONFIG_PPC_BOOK3E */ | |
9994a338 PM |
764 | |
765 | /* Check current_thread_info()->flags */ | |
c58ce2b1 | 766 | andi. r0,r4,_TIF_USER_WORK_MASK |
13d543cd | 767 | bne 1f |
70fe3d98 | 768 | #ifdef CONFIG_PPC_BOOK3E |
13d543cd BB |
769 | /* |
770 | * Check to see if the dbcr0 register is set up to debug. | |
771 | * Use the internal debug mode bit to do this. | |
772 | */ | |
773 | andis. r0,r3,DBCR0_IDM@h | |
c58ce2b1 | 774 | beq restore |
13d543cd BB |
775 | mfmsr r0 |
776 | rlwinm r0,r0,0,~MSR_DE /* Clear MSR.DE */ | |
777 | mtmsr r0 | |
778 | mtspr SPRN_DBCR0,r3 | |
779 | li r10, -1 | |
780 | mtspr SPRN_DBSR,r10 | |
781 | b restore | |
782 | #else | |
70fe3d98 CB |
783 | addi r3,r1,STACK_FRAME_OVERHEAD |
784 | bl restore_math | |
785 | b restore | |
13d543cd BB |
786 | #endif |
787 | 1: andi. r0,r4,_TIF_NEED_RESCHED | |
788 | beq 2f | |
b1576fec | 789 | bl restore_interrupts |
5d1c5745 | 790 | SCHEDULE_USER |
b1576fec | 791 | b ret_from_except_lite |
d31626f7 PM |
792 | 2: |
793 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
794 | andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM | |
795 | bne 3f /* only restore TM if nothing else to do */ | |
796 | addi r3,r1,STACK_FRAME_OVERHEAD | |
b1576fec | 797 | bl restore_tm_state |
d31626f7 PM |
798 | b restore |
799 | 3: | |
800 | #endif | |
b1576fec | 801 | bl save_nvgprs |
808be314 AB |
802 | /* |
803 | * Use a non volatile GPR to save and restore our thread_info flags | |
804 | * across the call to restore_interrupts. | |
805 | */ | |
806 | mr r30,r4 | |
b1576fec | 807 | bl restore_interrupts |
808be314 | 808 | mr r4,r30 |
c58ce2b1 | 809 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec AB |
810 | bl do_notify_resume |
811 | b ret_from_except | |
c58ce2b1 TC |
812 | |
813 | resume_kernel: | |
a9c4e541 | 814 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
0edfdd10 | 815 | andis. r8,r4,_TIF_EMULATE_STACK_STORE@h |
a9c4e541 TC |
816 | beq+ 1f |
817 | ||
818 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ | |
819 | ||
9e1ba4f2 | 820 | ld r3,GPR1(r1) |
a9c4e541 TC |
821 | subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */ |
822 | mr r4,r1 /* src: current exception frame */ | |
823 | mr r1,r3 /* Reroute the trampoline frame to r1 */ | |
824 | ||
825 | /* Copy from the original to the trampoline. */ | |
826 | li r5,INT_FRAME_SIZE/8 /* size: INT_FRAME_SIZE */ | |
827 | li r6,0 /* start offset: 0 */ | |
828 | mtctr r5 | |
829 | 2: ldx r0,r6,r4 | |
830 | stdx r0,r6,r3 | |
831 | addi r6,r6,8 | |
832 | bdnz 2b | |
833 | ||
9e1ba4f2 RB |
834 | /* Do real store operation to complete stdu */ |
835 | ld r5,GPR1(r1) | |
a9c4e541 TC |
836 | std r8,0(r5) |
837 | ||
838 | /* Clear _TIF_EMULATE_STACK_STORE flag */ | |
839 | lis r11,_TIF_EMULATE_STACK_STORE@h | |
840 | addi r5,r9,TI_FLAGS | |
d8b92292 | 841 | 0: ldarx r4,0,r5 |
a9c4e541 TC |
842 | andc r4,r4,r11 |
843 | stdcx. r4,0,r5 | |
844 | bne- 0b | |
845 | 1: | |
846 | ||
c58ce2b1 TC |
847 | #ifdef CONFIG_PREEMPT |
848 | /* Check if we need to preempt */ | |
849 | andi. r0,r4,_TIF_NEED_RESCHED | |
850 | beq+ restore | |
851 | /* Check that preempt_count() == 0 and interrupts are enabled */ | |
852 | lwz r8,TI_PREEMPT(r9) | |
01417c6c MS |
853 | cmpwi cr0,r8,0 |
854 | bne restore | |
c58ce2b1 | 855 | ld r0,SOFTE(r1) |
01417c6c | 856 | andi. r0,r0,IRQS_DISABLED |
c58ce2b1 TC |
857 | bne restore |
858 | ||
859 | /* | |
860 | * Here we are preempting the current task. We want to make | |
de021bb7 | 861 | * sure we are soft-disabled first and reconcile irq state. |
c58ce2b1 | 862 | */ |
de021bb7 | 863 | RECONCILE_IRQ_STATE(r3,r4) |
90437bff | 864 | bl preempt_schedule_irq |
572177d7 TC |
865 | |
866 | /* | |
867 | * arch_local_irq_restore() from preempt_schedule_irq above may | |
868 | * enable hard interrupt but we really should disable interrupts | |
869 | * when we return from the interrupt, and so that we don't get | |
870 | * interrupted after loading SRR0/1. | |
871 | */ | |
872 | #ifdef CONFIG_PPC_BOOK3E | |
873 | wrteei 0 | |
874 | #else | |
49d09bf2 | 875 | li r10,MSR_RI |
572177d7 TC |
876 | mtmsrd r10,1 /* Update machine state */ |
877 | #endif /* CONFIG_PPC_BOOK3E */ | |
c58ce2b1 | 878 | #endif /* CONFIG_PREEMPT */ |
9994a338 | 879 | |
7230c564 BH |
880 | .globl fast_exc_return_irq |
881 | fast_exc_return_irq: | |
9994a338 | 882 | restore: |
7230c564 | 883 | /* |
7c0482e3 BH |
884 | * This is the main kernel exit path. First we check if we |
885 | * are about to re-enable interrupts | |
7230c564 | 886 | */ |
01f3880d | 887 | ld r5,SOFTE(r1) |
4e26bc4a | 888 | lbz r6,PACAIRQSOFTMASK(r13) |
01417c6c MS |
889 | andi. r5,r5,IRQS_DISABLED |
890 | bne .Lrestore_irq_off | |
7230c564 | 891 | |
7c0482e3 | 892 | /* We are enabling, were we already enabled ? Yes, just return */ |
01417c6c | 893 | andi. r6,r6,IRQS_DISABLED |
15770a13 | 894 | beq cr0,.Ldo_restore |
9994a338 | 895 | |
7c0482e3 | 896 | /* |
7230c564 BH |
897 | * We are about to soft-enable interrupts (we are hard disabled |
898 | * at this point). We check if there's anything that needs to | |
899 | * be replayed first. | |
900 | */ | |
901 | lbz r0,PACAIRQHAPPENED(r13) | |
902 | cmpwi cr0,r0,0 | |
15770a13 | 903 | bne- .Lrestore_check_irq_replay |
e56a6e20 | 904 | |
7230c564 BH |
905 | /* |
906 | * Get here when nothing happened while soft-disabled, just | |
907 | * soft-enable and move-on. We will hard-enable as a side | |
908 | * effect of rfi | |
909 | */ | |
15770a13 | 910 | .Lrestore_no_replay: |
7230c564 | 911 | TRACE_ENABLE_INTS |
c2e480ba | 912 | li r0,IRQS_ENABLED |
4e26bc4a | 913 | stb r0,PACAIRQSOFTMASK(r13); |
7230c564 BH |
914 | |
915 | /* | |
916 | * Final return path. BookE is handled in a different file | |
917 | */ | |
15770a13 | 918 | .Ldo_restore: |
2d27cfd3 | 919 | #ifdef CONFIG_PPC_BOOK3E |
b1576fec | 920 | b exception_return_book3e |
2d27cfd3 | 921 | #else |
7230c564 BH |
922 | /* |
923 | * Clear the reservation. If we know the CPU tracks the address of | |
924 | * the reservation then we can potentially save some cycles and use | |
925 | * a larx. On POWER6 and POWER7 this is significantly faster. | |
926 | */ | |
927 | BEGIN_FTR_SECTION | |
928 | stdcx. r0,0,r1 /* to clear the reservation */ | |
929 | FTR_SECTION_ELSE | |
930 | ldarx r4,0,r1 | |
931 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |
932 | ||
933 | /* | |
934 | * Some code path such as load_up_fpu or altivec return directly | |
935 | * here. They run entirely hard disabled and do not alter the | |
936 | * interrupt state. They also don't use lwarx/stwcx. and thus | |
937 | * are known not to leave dangling reservations. | |
938 | */ | |
939 | .globl fast_exception_return | |
940 | fast_exception_return: | |
941 | ld r3,_MSR(r1) | |
e56a6e20 PM |
942 | ld r4,_CTR(r1) |
943 | ld r0,_LINK(r1) | |
944 | mtctr r4 | |
945 | mtlr r0 | |
946 | ld r4,_XER(r1) | |
947 | mtspr SPRN_XER,r4 | |
948 | ||
890274c2 ME |
949 | kuap_check_amr r5, r6 |
950 | ||
e56a6e20 PM |
951 | REST_8GPRS(5, r1) |
952 | ||
9994a338 | 953 | andi. r0,r3,MSR_RI |
15770a13 | 954 | beq- .Lunrecov_restore |
9994a338 | 955 | |
e56a6e20 PM |
956 | /* |
957 | * Clear RI before restoring r13. If we are returning to | |
958 | * userspace and we take an exception after restoring r13, | |
959 | * we end up corrupting the userspace r13 value. | |
960 | */ | |
49d09bf2 | 961 | li r4,0 |
e56a6e20 | 962 | mtmsrd r4,1 |
9994a338 | 963 | |
afc07701 MN |
964 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
965 | /* TM debug */ | |
966 | std r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */ | |
967 | #endif | |
9994a338 PM |
968 | /* |
969 | * r13 is our per cpu area, only restore it if we are returning to | |
7230c564 BH |
970 | * userspace the value stored in the stack frame may belong to |
971 | * another CPU. | |
9994a338 | 972 | */ |
e56a6e20 | 973 | andi. r0,r3,MSR_PR |
9994a338 | 974 | beq 1f |
0c4888ef | 975 | BEGIN_FTR_SECTION |
4c2de74c NP |
976 | /* Restore PPR */ |
977 | ld r2,_PPR(r1) | |
978 | mtspr SPRN_PPR,r2 | |
0c4888ef | 979 | END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) |
c223c903 | 980 | ACCOUNT_CPU_USER_EXIT(r13, r2, r4) |
9994a338 | 981 | REST_GPR(13, r1) |
a08f828c | 982 | |
890274c2 ME |
983 | /* |
984 | * We don't need to restore AMR on the way back to userspace for KUAP. | |
985 | * The value of AMR only matters while we're in the kernel. | |
986 | */ | |
e56a6e20 | 987 | mtspr SPRN_SRR1,r3 |
9994a338 PM |
988 | |
989 | ld r2,_CCR(r1) | |
990 | mtcrf 0xFF,r2 | |
991 | ld r2,_NIP(r1) | |
992 | mtspr SPRN_SRR0,r2 | |
993 | ||
994 | ld r0,GPR0(r1) | |
995 | ld r2,GPR2(r1) | |
996 | ld r3,GPR3(r1) | |
997 | ld r4,GPR4(r1) | |
998 | ld r1,GPR1(r1) | |
a08f828c NP |
999 | RFI_TO_USER |
1000 | b . /* prevent speculative execution */ | |
9994a338 | 1001 | |
a08f828c NP |
1002 | 1: mtspr SPRN_SRR1,r3 |
1003 | ||
1004 | ld r2,_CCR(r1) | |
1005 | mtcrf 0xFF,r2 | |
1006 | ld r2,_NIP(r1) | |
1007 | mtspr SPRN_SRR0,r2 | |
9994a338 | 1008 | |
eddd0b33 NS |
1009 | /* |
1010 | * Leaving a stale exception_marker on the stack can confuse | |
1011 | * the reliable stack unwinder later on. Clear it. | |
1012 | */ | |
1013 | li r2,0 | |
1014 | std r2,STACK_FRAME_OVERHEAD-16(r1) | |
1015 | ||
a08f828c NP |
1016 | ld r0,GPR0(r1) |
1017 | ld r2,GPR2(r1) | |
1018 | ld r3,GPR3(r1) | |
890274c2 ME |
1019 | |
1020 | kuap_restore_amr r4 | |
1021 | ||
a08f828c NP |
1022 | ld r4,GPR4(r1) |
1023 | ld r1,GPR1(r1) | |
1024 | RFI_TO_KERNEL | |
9994a338 PM |
1025 | b . /* prevent speculative execution */ |
1026 | ||
2d27cfd3 BH |
1027 | #endif /* CONFIG_PPC_BOOK3E */ |
1028 | ||
7c0482e3 BH |
1029 | /* |
1030 | * We are returning to a context with interrupts soft disabled. | |
1031 | * | |
1032 | * However, we may also about to hard enable, so we need to | |
1033 | * make sure that in this case, we also clear PACA_IRQ_HARD_DIS | |
1034 | * or that bit can get out of sync and bad things will happen | |
1035 | */ | |
15770a13 | 1036 | .Lrestore_irq_off: |
7c0482e3 BH |
1037 | ld r3,_MSR(r1) |
1038 | lbz r7,PACAIRQHAPPENED(r13) | |
1039 | andi. r0,r3,MSR_EE | |
1040 | beq 1f | |
1041 | rlwinm r7,r7,0,~PACA_IRQ_HARD_DIS | |
1042 | stb r7,PACAIRQHAPPENED(r13) | |
acb1feab | 1043 | 1: |
9aa88188 | 1044 | #if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG) |
acb1feab | 1045 | /* The interrupt should not have soft enabled. */ |
4e26bc4a MS |
1046 | lbz r7,PACAIRQSOFTMASK(r13) |
1047 | 1: tdeqi r7,IRQS_ENABLED | |
acb1feab NP |
1048 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
1049 | #endif | |
15770a13 | 1050 | b .Ldo_restore |
7c0482e3 | 1051 | |
7230c564 BH |
1052 | /* |
1053 | * Something did happen, check if a re-emit is needed | |
1054 | * (this also clears paca->irq_happened) | |
1055 | */ | |
15770a13 | 1056 | .Lrestore_check_irq_replay: |
7230c564 BH |
1057 | /* XXX: We could implement a fast path here where we check |
1058 | * for irq_happened being just 0x01, in which case we can | |
1059 | * clear it and return. That means that we would potentially | |
1060 | * miss a decrementer having wrapped all the way around. | |
1061 | * | |
1062 | * Still, this might be useful for things like hash_page | |
1063 | */ | |
b1576fec | 1064 | bl __check_irq_replay |
7230c564 | 1065 | cmpwi cr0,r3,0 |
15770a13 | 1066 | beq .Lrestore_no_replay |
7230c564 BH |
1067 | |
1068 | /* | |
1069 | * We need to re-emit an interrupt. We do so by re-using our | |
1070 | * existing exception frame. We first change the trap value, | |
1071 | * but we need to ensure we preserve the low nibble of it | |
1072 | */ | |
1073 | ld r4,_TRAP(r1) | |
1074 | clrldi r4,r4,60 | |
1075 | or r4,r4,r3 | |
1076 | std r4,_TRAP(r1) | |
1077 | ||
9b81c021 NP |
1078 | /* |
1079 | * PACA_IRQ_HARD_DIS won't always be set here, so set it now | |
1080 | * to reconcile the IRQ state. Tracing is already accounted for. | |
1081 | */ | |
1082 | lbz r4,PACAIRQHAPPENED(r13) | |
1083 | ori r4,r4,PACA_IRQ_HARD_DIS | |
1084 | stb r4,PACAIRQHAPPENED(r13) | |
1085 | ||
7230c564 BH |
1086 | /* |
1087 | * Then find the right handler and call it. Interrupts are | |
1088 | * still soft-disabled and we keep them that way. | |
1089 | */ | |
1090 | cmpwi cr0,r3,0x500 | |
1091 | bne 1f | |
1092 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec AB |
1093 | bl do_IRQ |
1094 | b ret_from_except | |
f442d004 MS |
1095 | 1: cmpwi cr0,r3,0xf00 |
1096 | bne 1f | |
1097 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
1098 | bl performance_monitor_exception | |
1099 | b ret_from_except | |
0869b6fd MS |
1100 | 1: cmpwi cr0,r3,0xe60 |
1101 | bne 1f | |
1102 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
1103 | bl handle_hmi_exception | |
1104 | b ret_from_except | |
7230c564 BH |
1105 | 1: cmpwi cr0,r3,0x900 |
1106 | bne 1f | |
1107 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec AB |
1108 | bl timer_interrupt |
1109 | b ret_from_except | |
fe9e1d54 IM |
1110 | #ifdef CONFIG_PPC_DOORBELL |
1111 | 1: | |
7230c564 | 1112 | #ifdef CONFIG_PPC_BOOK3E |
fe9e1d54 IM |
1113 | cmpwi cr0,r3,0x280 |
1114 | #else | |
d6f73fc6 | 1115 | cmpwi cr0,r3,0xa00 |
fe9e1d54 | 1116 | #endif /* CONFIG_PPC_BOOK3E */ |
7230c564 BH |
1117 | bne 1f |
1118 | addi r3,r1,STACK_FRAME_OVERHEAD; | |
b1576fec | 1119 | bl doorbell_exception |
fe9e1d54 | 1120 | #endif /* CONFIG_PPC_DOORBELL */ |
b1576fec | 1121 | 1: b ret_from_except /* What else to do here ? */ |
7230c564 | 1122 | |
15770a13 | 1123 | .Lunrecov_restore: |
9994a338 | 1124 | addi r3,r1,STACK_FRAME_OVERHEAD |
b1576fec | 1125 | bl unrecoverable_exception |
15770a13 NR |
1126 | b .Lunrecov_restore |
1127 | ||
1128 | _ASM_NOKPROBE_SYMBOL(ret_from_except); | |
1129 | _ASM_NOKPROBE_SYMBOL(ret_from_except_lite); | |
1130 | _ASM_NOKPROBE_SYMBOL(resume_kernel); | |
1131 | _ASM_NOKPROBE_SYMBOL(fast_exc_return_irq); | |
1132 | _ASM_NOKPROBE_SYMBOL(restore); | |
1133 | _ASM_NOKPROBE_SYMBOL(fast_exception_return); | |
1134 | ||
9994a338 PM |
1135 | |
1136 | #ifdef CONFIG_PPC_RTAS | |
1137 | /* | |
1138 | * On CHRP, the Run-Time Abstraction Services (RTAS) have to be | |
1139 | * called with the MMU off. | |
1140 | * | |
1141 | * In addition, we need to be in 32b mode, at least for now. | |
1142 | * | |
1143 | * Note: r3 is an input parameter to rtas, so don't trash it... | |
1144 | */ | |
1145 | _GLOBAL(enter_rtas) | |
1146 | mflr r0 | |
1147 | std r0,16(r1) | |
ed9e84a4 | 1148 | stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */ |
9994a338 PM |
1149 | |
1150 | /* Because RTAS is running in 32b mode, it clobbers the high order half | |
1151 | * of all registers that it saves. We therefore save those registers | |
1152 | * RTAS might touch to the stack. (r0, r3-r13 are caller saved) | |
1153 | */ | |
1154 | SAVE_GPR(2, r1) /* Save the TOC */ | |
1155 | SAVE_GPR(13, r1) /* Save paca */ | |
1156 | SAVE_8GPRS(14, r1) /* Save the non-volatiles */ | |
1157 | SAVE_10GPRS(22, r1) /* ditto */ | |
1158 | ||
1159 | mfcr r4 | |
1160 | std r4,_CCR(r1) | |
1161 | mfctr r5 | |
1162 | std r5,_CTR(r1) | |
1163 | mfspr r6,SPRN_XER | |
1164 | std r6,_XER(r1) | |
1165 | mfdar r7 | |
1166 | std r7,_DAR(r1) | |
1167 | mfdsisr r8 | |
1168 | std r8,_DSISR(r1) | |
9994a338 | 1169 | |
9fe901d1 MK |
1170 | /* Temporary workaround to clear CR until RTAS can be modified to |
1171 | * ignore all bits. | |
1172 | */ | |
1173 | li r0,0 | |
1174 | mtcr r0 | |
1175 | ||
01417c6c | 1176 | #ifdef CONFIG_BUG |
9994a338 PM |
1177 | /* There is no way it is acceptable to get here with interrupts enabled, |
1178 | * check it with the asm equivalent of WARN_ON | |
1179 | */ | |
4e26bc4a | 1180 | lbz r0,PACAIRQSOFTMASK(r13) |
01417c6c | 1181 | 1: tdeqi r0,IRQS_ENABLED |
007d88d0 DW |
1182 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
1183 | #endif | |
01417c6c | 1184 | |
d04c56f7 PM |
1185 | /* Hard-disable interrupts */ |
1186 | mfmsr r6 | |
1187 | rldicl r7,r6,48,1 | |
1188 | rotldi r7,r7,16 | |
1189 | mtmsrd r7,1 | |
1190 | ||
9994a338 PM |
1191 | /* Unfortunately, the stack pointer and the MSR are also clobbered, |
1192 | * so they are saved in the PACA which allows us to restore | |
1193 | * our original state after RTAS returns. | |
1194 | */ | |
1195 | std r1,PACAR1(r13) | |
1196 | std r6,PACASAVEDMSR(r13) | |
1197 | ||
1198 | /* Setup our real return addr */ | |
ad0289e4 | 1199 | LOAD_REG_ADDR(r4,rtas_return_loc) |
e58c3495 | 1200 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 PM |
1201 | mtlr r4 |
1202 | ||
1203 | li r0,0 | |
1204 | ori r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI | |
1205 | andc r0,r6,r0 | |
1206 | ||
1207 | li r9,1 | |
1208 | rldicr r9,r9,MSR_SF_LG,(63-MSR_SF_LG) | |
5c0484e2 | 1209 | ori r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE |
9994a338 | 1210 | andc r6,r0,r9 |
90653a84 NR |
1211 | |
1212 | __enter_rtas: | |
9994a338 PM |
1213 | sync /* disable interrupts so SRR0/1 */ |
1214 | mtmsrd r0 /* don't get trashed */ | |
1215 | ||
e58c3495 | 1216 | LOAD_REG_ADDR(r4, rtas) |
9994a338 PM |
1217 | ld r5,RTASENTRY(r4) /* get the rtas->entry value */ |
1218 | ld r4,RTASBASE(r4) /* get the rtas->base value */ | |
1219 | ||
1220 | mtspr SPRN_SRR0,r5 | |
1221 | mtspr SPRN_SRR1,r6 | |
222f20f1 | 1222 | RFI_TO_KERNEL |
9994a338 PM |
1223 | b . /* prevent speculative execution */ |
1224 | ||
ad0289e4 | 1225 | rtas_return_loc: |
5c0484e2 BH |
1226 | FIXUP_ENDIAN |
1227 | ||
47fee31d NP |
1228 | /* |
1229 | * Clear RI and set SF before anything. | |
1230 | */ | |
1231 | mfmsr r6 | |
1232 | li r0,MSR_RI | |
1233 | andc r6,r6,r0 | |
1234 | sldi r0,r0,(MSR_SF_LG - MSR_RI_LG) | |
1235 | or r6,r6,r0 | |
1236 | sync | |
1237 | mtmsrd r6 | |
1238 | ||
9994a338 | 1239 | /* relocation is off at this point */ |
2dd60d79 | 1240 | GET_PACA(r4) |
e58c3495 | 1241 | clrldi r4,r4,2 /* convert to realmode address */ |
9994a338 | 1242 | |
e31aa453 PM |
1243 | bcl 20,31,$+4 |
1244 | 0: mflr r3 | |
ad0289e4 | 1245 | ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ |
e31aa453 | 1246 | |
9994a338 | 1247 | ld r1,PACAR1(r4) /* Restore our SP */ |
9994a338 PM |
1248 | ld r4,PACASAVEDMSR(r4) /* Restore our MSR */ |
1249 | ||
1250 | mtspr SPRN_SRR0,r3 | |
1251 | mtspr SPRN_SRR1,r4 | |
222f20f1 | 1252 | RFI_TO_KERNEL |
9994a338 | 1253 | b . /* prevent speculative execution */ |
90653a84 NR |
1254 | _ASM_NOKPROBE_SYMBOL(__enter_rtas) |
1255 | _ASM_NOKPROBE_SYMBOL(rtas_return_loc) | |
9994a338 | 1256 | |
e31aa453 | 1257 | .align 3 |
eb039161 | 1258 | 1: .8byte rtas_restore_regs |
e31aa453 | 1259 | |
ad0289e4 | 1260 | rtas_restore_regs: |
9994a338 PM |
1261 | /* relocation is on at this point */ |
1262 | REST_GPR(2, r1) /* Restore the TOC */ | |
1263 | REST_GPR(13, r1) /* Restore paca */ | |
1264 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | |
1265 | REST_10GPRS(22, r1) /* ditto */ | |
1266 | ||
2dd60d79 | 1267 | GET_PACA(r13) |
9994a338 PM |
1268 | |
1269 | ld r4,_CCR(r1) | |
1270 | mtcr r4 | |
1271 | ld r5,_CTR(r1) | |
1272 | mtctr r5 | |
1273 | ld r6,_XER(r1) | |
1274 | mtspr SPRN_XER,r6 | |
1275 | ld r7,_DAR(r1) | |
1276 | mtdar r7 | |
1277 | ld r8,_DSISR(r1) | |
1278 | mtdsisr r8 | |
9994a338 | 1279 | |
ed9e84a4 | 1280 | addi r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */ |
9994a338 PM |
1281 | ld r0,16(r1) /* get return address */ |
1282 | ||
1283 | mtlr r0 | |
1284 | blr /* return to caller */ | |
1285 | ||
1286 | #endif /* CONFIG_PPC_RTAS */ | |
1287 | ||
9994a338 PM |
1288 | _GLOBAL(enter_prom) |
1289 | mflr r0 | |
1290 | std r0,16(r1) | |
ed9e84a4 | 1291 | stdu r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */ |
9994a338 PM |
1292 | |
1293 | /* Because PROM is running in 32b mode, it clobbers the high order half | |
1294 | * of all registers that it saves. We therefore save those registers | |
1295 | * PROM might touch to the stack. (r0, r3-r13 are caller saved) | |
1296 | */ | |
6c171994 | 1297 | SAVE_GPR(2, r1) |
9994a338 PM |
1298 | SAVE_GPR(13, r1) |
1299 | SAVE_8GPRS(14, r1) | |
1300 | SAVE_10GPRS(22, r1) | |
6c171994 | 1301 | mfcr r10 |
9994a338 | 1302 | mfmsr r11 |
6c171994 | 1303 | std r10,_CCR(r1) |
9994a338 PM |
1304 | std r11,_MSR(r1) |
1305 | ||
5c0484e2 BH |
1306 | /* Put PROM address in SRR0 */ |
1307 | mtsrr0 r4 | |
1308 | ||
1309 | /* Setup our trampoline return addr in LR */ | |
1310 | bcl 20,31,$+4 | |
1311 | 0: mflr r4 | |
1312 | addi r4,r4,(1f - 0b) | |
1313 | mtlr r4 | |
9994a338 | 1314 | |
5c0484e2 | 1315 | /* Prepare a 32-bit mode big endian MSR |
9994a338 | 1316 | */ |
2d27cfd3 BH |
1317 | #ifdef CONFIG_PPC_BOOK3E |
1318 | rlwinm r11,r11,0,1,31 | |
5c0484e2 BH |
1319 | mtsrr1 r11 |
1320 | rfi | |
2d27cfd3 | 1321 | #else /* CONFIG_PPC_BOOK3E */ |
5c0484e2 BH |
1322 | LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_ISF | MSR_LE) |
1323 | andc r11,r11,r12 | |
1324 | mtsrr1 r11 | |
222f20f1 | 1325 | RFI_TO_KERNEL |
2d27cfd3 | 1326 | #endif /* CONFIG_PPC_BOOK3E */ |
9994a338 | 1327 | |
5c0484e2 BH |
1328 | 1: /* Return from OF */ |
1329 | FIXUP_ENDIAN | |
9994a338 PM |
1330 | |
1331 | /* Just make sure that r1 top 32 bits didn't get | |
1332 | * corrupt by OF | |
1333 | */ | |
1334 | rldicl r1,r1,0,32 | |
1335 | ||
1336 | /* Restore the MSR (back to 64 bits) */ | |
1337 | ld r0,_MSR(r1) | |
6c171994 | 1338 | MTMSRD(r0) |
9994a338 PM |
1339 | isync |
1340 | ||
1341 | /* Restore other registers */ | |
1342 | REST_GPR(2, r1) | |
1343 | REST_GPR(13, r1) | |
1344 | REST_8GPRS(14, r1) | |
1345 | REST_10GPRS(22, r1) | |
1346 | ld r4,_CCR(r1) | |
1347 | mtcr r4 | |
ed9e84a4 JS |
1348 | |
1349 | addi r1,r1,SWITCH_FRAME_SIZE | |
9994a338 PM |
1350 | ld r0,16(r1) |
1351 | mtlr r0 | |
1352 | blr |