1 #include <asm/asm-offsets.h>
3 #ifdef CONFIG_PPC_BOOK3S
4 #include <asm/exception-64s.h>
6 #include <asm/exception-64e.h>
8 #include <asm/feature-fixups.h>
9 #include <asm/head-64.h>
10 #include <asm/hw_irq.h>
13 #include <asm/ppc_asm.h>
14 #include <asm/ptrace.h>
18 .macro DEBUG_SRR_VALID srr
19 #ifdef CONFIG_PPC_RFI_SRR_DEBUG
26 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
30 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
37 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
41 EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
46 #ifdef CONFIG_PPC_BOOK3S
47 .macro system_call_vectored name trapnr
48 .globl system_call_vectored_\name
49 system_call_vectored_\name:
50 _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name)
51 SCV_INTERRUPT_TO_KERNEL
63 /* Save syscall parameters in r3-r8 */
65 /* Zero r9-r12, this should only be required when restoring all GPRs */
80 /* Calling convention has r3 = regs, r4 = orig r0 */
81 addi r3,r1,STACK_FRAME_OVERHEAD
83 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
84 std r11,-16(r3) /* "regshere" marker */
88 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
91 * scv enters with MSR[EE]=1 and is immediately considered soft-masked.
92 * The entry vector already sets PACAIRQSOFTMASK to IRQS_ALL_DISABLED,
93 * and interrupts may be masked and pending already.
94 * system_call_exception() will call trace_hardirqs_off() which means
95 * interrupts could already have been blocked before trace_hardirqs_off,
96 * but this is the best we can do.
99 bl system_call_exception
101 .Lsyscall_vectored_\name\()_exit:
102 addi r4,r1,STACK_FRAME_OVERHEAD
104 bl syscall_exit_prepare
105 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
106 .Lsyscall_vectored_\name\()_rst_start:
107 lbz r11,PACAIRQHAPPENED(r13)
108 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
109 bne- syscall_vectored_\name\()_restart
111 stb r11,PACAIRQSOFTMASK(r13)
113 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
120 stdcx. r0,0,r1 /* to clear the reservation */
121 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
125 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
128 bne .Lsyscall_vectored_\name\()_restore_regs
130 /* rfscv returns with LR->NIA and CTR->MSR */
134 /* Could zero these as per ABI, but we may consider a stricter ABI
135 * which preserves these if libc implementations can benefit, so
136 * restore them for now until further measurement is done. */
139 /* Zero volatile regs that may contain sensitive kernel data */
144 * We don't need to restore AMR on the way back to userspace for KUAP.
145 * The value of AMR only matters while we're in the kernel.
152 b . /* prevent speculative execution */
154 .Lsyscall_vectored_\name\()_restore_regs:
171 .Lsyscall_vectored_\name\()_rst_end:
173 syscall_vectored_\name\()_restart:
174 _ASM_NOKPROBE_SYMBOL(syscall_vectored_\name\()_restart)
176 ld r1,PACA_EXIT_SAVE_R1(r13)
179 addi r4,r1,STACK_FRAME_OVERHEAD
180 li r11,IRQS_ALL_DISABLED
181 stb r11,PACAIRQSOFTMASK(r13)
182 bl syscall_exit_restart
183 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
184 b .Lsyscall_vectored_\name\()_rst_start
187 SOFT_MASK_TABLE(.Lsyscall_vectored_\name\()_rst_start, 1b)
188 RESTART_TABLE(.Lsyscall_vectored_\name\()_rst_start, .Lsyscall_vectored_\name\()_rst_end, syscall_vectored_\name\()_restart)
192 system_call_vectored common 0x3000
195 * We instantiate another entry copy for the SIGILL variant, with TRAP=0x7ff0
196 * which is tested by system_call_exception when r0 is -1 (as set by vector
199 system_call_vectored sigill 0x7ff0
201 #endif /* CONFIG_PPC_BOOK3S */
203 .balign IFETCH_ALIGN_BYTES
204 .globl system_call_common_real
205 system_call_common_real:
206 _ASM_NOKPROBE_SYMBOL(system_call_common_real)
207 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
210 .balign IFETCH_ALIGN_BYTES
211 .globl system_call_common
213 _ASM_NOKPROBE_SYMBOL(system_call_common)
222 #ifdef CONFIG_PPC_E500
223 START_BTB_FLUSH_SECTION
225 END_BTB_FLUSH_SECTION
230 /* Save syscall parameters in r3-r8 */
232 /* Zero r9-r12, this should only be required when restoring all GPRs */
244 * This clears CR0.SO (bit 28), which is the error indication on
245 * return from this system call.
247 rldimi r12,r11,28,(63-28)
253 /* Calling convention has r3 = regs, r4 = orig r0 */
254 addi r3,r1,STACK_FRAME_OVERHEAD
256 LOAD_REG_IMMEDIATE(r11, STACK_FRAME_REGS_MARKER)
257 std r11,-16(r3) /* "regshere" marker */
259 #ifdef CONFIG_PPC_BOOK3S
261 stb r11,PACASRR_VALID(r13)
265 * We always enter kernel from userspace with irq soft-mask enabled and
266 * nothing pending. system_call_exception() will call
267 * trace_hardirqs_off().
269 li r11,IRQS_ALL_DISABLED
270 stb r11,PACAIRQSOFTMASK(r13)
271 #ifdef CONFIG_PPC_BOOK3S
272 li r12,-1 /* Set MSR_EE and MSR_RI */
278 bl system_call_exception
281 addi r4,r1,STACK_FRAME_OVERHEAD
283 bl syscall_exit_prepare
284 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
285 #ifdef CONFIG_PPC_BOOK3S
287 lbz r11,PACAIRQHAPPENED(r13)
288 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
292 stb r11,PACAIRQSOFTMASK(r13)
294 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
300 #ifdef CONFIG_PPC_BOOK3S
301 lbz r4,PACASRR_VALID(r13)
305 stb r4,PACASRR_VALID(r13)
315 stdcx. r0,0,r1 /* to clear the reservation */
316 END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
319 bne .Lsyscall_restore_regs
320 /* Zero volatile regs that may contain sensitive kernel data */
325 .Lsyscall_restore_regs_cont:
329 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
332 * We don't need to restore AMR on the way back to userspace for KUAP.
333 * The value of AMR only matters while we're in the kernel.
340 b . /* prevent speculative execution */
342 .Lsyscall_restore_regs:
350 b .Lsyscall_restore_regs_cont
353 #ifdef CONFIG_PPC_BOOK3S
355 _ASM_NOKPROBE_SYMBOL(syscall_restart)
357 ld r1,PACA_EXIT_SAVE_R1(r13)
360 addi r4,r1,STACK_FRAME_OVERHEAD
361 li r11,IRQS_ALL_DISABLED
362 stb r11,PACAIRQSOFTMASK(r13)
363 bl syscall_exit_restart
364 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
365 b .Lsyscall_rst_start
368 SOFT_MASK_TABLE(.Lsyscall_rst_start, 1b)
369 RESTART_TABLE(.Lsyscall_rst_start, .Lsyscall_rst_end, syscall_restart)
373 * If MSR EE/RI was never enabled, IRQs not reconciled, NVGPRs not
374 * touched, no exit work created, then this can be used.
376 .balign IFETCH_ALIGN_BYTES
377 .globl fast_interrupt_return_srr
378 fast_interrupt_return_srr:
379 _ASM_NOKPROBE_SYMBOL(fast_interrupt_return_srr)
380 kuap_check_amr r3, r4
383 #ifdef CONFIG_PPC_BOOK3S
385 kuap_user_restore r3, r4
386 b .Lfast_user_interrupt_return_srr
387 1: kuap_kernel_restore r3, r4
389 li r3,0 /* 0 return value, no EMULATE_STACK_STORE */
390 bne+ .Lfast_kernel_interrupt_return_srr
391 addi r3,r1,STACK_FRAME_OVERHEAD
392 bl unrecoverable_exception
393 b . /* should not get here */
395 bne .Lfast_user_interrupt_return_srr
396 b .Lfast_kernel_interrupt_return_srr
399 .macro interrupt_return_macro srr
400 .balign IFETCH_ALIGN_BYTES
401 .globl interrupt_return_\srr
402 interrupt_return_\srr\():
403 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\())
406 beq interrupt_return_\srr\()_kernel
407 interrupt_return_\srr\()_user: /* make backtraces match the _kernel variant */
408 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user)
409 addi r3,r1,STACK_FRAME_OVERHEAD
410 bl interrupt_exit_user_prepare
412 bne- .Lrestore_nvgprs_\srr
413 .Lrestore_nvgprs_\srr\()_cont:
414 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
415 #ifdef CONFIG_PPC_BOOK3S
416 .Linterrupt_return_\srr\()_user_rst_start:
417 lbz r11,PACAIRQHAPPENED(r13)
418 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
419 bne- interrupt_return_\srr\()_user_restart
422 stb r11,PACAIRQSOFTMASK(r13)
424 stb r11,PACAIRQHAPPENED(r13) # clear out possible HARD_DIS
426 .Lfast_user_interrupt_return_\srr\():
427 #ifdef CONFIG_PPC_BOOK3S
429 lbz r4,PACASRR_VALID(r13)
431 lbz r4,PACAHSRR_VALID(r13)
443 #ifdef CONFIG_PPC_BOOK3S
444 stb r4,PACASRR_VALID(r13)
450 #ifdef CONFIG_PPC_BOOK3S
451 stb r4,PACAHSRR_VALID(r13)
456 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
457 lbz r4,PACAIRQSOFTMASK(r13)
458 tdnei r4,IRQS_ENABLED
464 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
467 stdcx. r0,0,r1 /* to clear the reservation */
470 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
493 b . /* prevent speculative execution */
494 .Linterrupt_return_\srr\()_user_rst_end:
496 .Lrestore_nvgprs_\srr\():
498 b .Lrestore_nvgprs_\srr\()_cont
500 #ifdef CONFIG_PPC_BOOK3S
501 interrupt_return_\srr\()_user_restart:
502 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_user_restart)
504 ld r1,PACA_EXIT_SAVE_R1(r13)
506 addi r3,r1,STACK_FRAME_OVERHEAD
507 li r11,IRQS_ALL_DISABLED
508 stb r11,PACAIRQSOFTMASK(r13)
509 bl interrupt_exit_user_restart
510 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
511 b .Linterrupt_return_\srr\()_user_rst_start
514 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_user_rst_start, 1b)
515 RESTART_TABLE(.Linterrupt_return_\srr\()_user_rst_start, .Linterrupt_return_\srr\()_user_rst_end, interrupt_return_\srr\()_user_restart)
518 .balign IFETCH_ALIGN_BYTES
519 interrupt_return_\srr\()_kernel:
520 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
521 addi r3,r1,STACK_FRAME_OVERHEAD
522 bl interrupt_exit_kernel_prepare
524 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
525 .Linterrupt_return_\srr\()_kernel_rst_start:
527 cmpwi r11,IRQS_ENABLED
528 stb r11,PACAIRQSOFTMASK(r13)
529 beq .Linterrupt_return_\srr\()_soft_enabled
532 * Returning to soft-disabled context.
533 * Check if a MUST_HARD_MASK interrupt has become pending, in which
534 * case we need to disable MSR[EE] in the return context.
538 beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
539 lbz r11,PACAIRQHAPPENED(r13)
540 andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
541 beq 1f // No HARD_MASK pending
543 /* Must clear MSR_EE from _MSR */
544 #ifdef CONFIG_PPC_BOOK3S
546 /* Clear valid before changing _MSR */
548 stb r10,PACASRR_VALID(r13)
550 stb r10,PACAHSRR_VALID(r13)
555 b .Lfast_kernel_interrupt_return_\srr\()
557 .Linterrupt_return_\srr\()_soft_enabled:
558 #ifdef CONFIG_PPC_BOOK3S
559 lbz r11,PACAIRQHAPPENED(r13)
560 andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
561 bne- interrupt_return_\srr\()_kernel_restart
565 stb r11,PACAIRQHAPPENED(r13) // clear the possible HARD_DIS
567 .Lfast_kernel_interrupt_return_\srr\():
569 #ifdef CONFIG_PPC_BOOK3S
571 lbz r4,PACASRR_VALID(r13)
573 lbz r4,PACAHSRR_VALID(r13)
585 #ifdef CONFIG_PPC_BOOK3S
586 stb r4,PACASRR_VALID(r13)
592 #ifdef CONFIG_PPC_BOOK3S
593 stb r4,PACAHSRR_VALID(r13)
599 stdcx. r0,0,r1 /* to clear the reservation */
602 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
617 * Leaving a stale STACK_FRAME_REGS_MARKER on the stack can confuse
618 * the reliable stack unwinder later on. Clear it.
620 std r0,STACK_FRAME_OVERHEAD-16(r1)
624 bne- cr1,1f /* emulate stack store */
634 b . /* prevent speculative execution */
637 * Emulate stack store with update. New r1 value was already calculated
638 * and updated in our interrupt regs by emulate_loadstore, but we can't
639 * store the previous value of r1 to the stack before re-loading our
640 * registers from it, otherwise they could be clobbered. Use
641 * PACA_EXGEN as temporary storage to hold the store data, as
642 * interrupts are disabled here so it won't be clobbered.
645 std r9,PACA_EXGEN+0(r13)
646 addi r9,r1,INT_FRAME_SIZE /* get original r1 */
650 std r9,0(r1) /* perform store component of stdu */
651 ld r9,PACA_EXGEN+0(r13)
658 b . /* prevent speculative execution */
659 .Linterrupt_return_\srr\()_kernel_rst_end:
661 #ifdef CONFIG_PPC_BOOK3S
662 interrupt_return_\srr\()_kernel_restart:
663 _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel_restart)
665 ld r1,PACA_EXIT_SAVE_R1(r13)
667 addi r3,r1,STACK_FRAME_OVERHEAD
668 li r11,IRQS_ALL_DISABLED
669 stb r11,PACAIRQSOFTMASK(r13)
670 bl interrupt_exit_kernel_restart
671 std r1,PACA_EXIT_SAVE_R1(r13) /* save r1 for restart */
672 b .Linterrupt_return_\srr\()_kernel_rst_start
675 SOFT_MASK_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, 1b)
676 RESTART_TABLE(.Linterrupt_return_\srr\()_kernel_rst_start, .Linterrupt_return_\srr\()_kernel_rst_end, interrupt_return_\srr\()_kernel_restart)
681 interrupt_return_macro srr
682 #ifdef CONFIG_PPC_BOOK3S
683 interrupt_return_macro hsrr
685 .globl __end_soft_masked
687 DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
688 #endif /* CONFIG_PPC_BOOK3S */
690 #ifdef CONFIG_PPC_BOOK3S
691 _GLOBAL(ret_from_fork_scv)
694 li r3,0 /* fork() return value */
695 b .Lsyscall_vectored_common_exit
698 _GLOBAL(ret_from_fork)
701 li r3,0 /* fork() return value */
704 _GLOBAL(ret_from_kernel_thread)
709 #ifdef CONFIG_PPC64_ELF_ABI_V2