1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Derived from "arch/i386/kernel/process.c"
4 * Copyright (C) 1995 Linus Torvalds
6 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
7 * Paul Mackerras (paulus@cs.anu.edu.au)
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/kernel.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/elf.h>
27 #include <linux/prctl.h>
28 #include <linux/init_task.h>
29 #include <linux/export.h>
30 #include <linux/kallsyms.h>
31 #include <linux/mqueue.h>
32 #include <linux/hardirq.h>
33 #include <linux/utsname.h>
34 #include <linux/ftrace.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/personality.h>
37 #include <linux/random.h>
38 #include <linux/hw_breakpoint.h>
39 #include <linux/uaccess.h>
40 #include <linux/elf-randomize.h>
41 #include <linux/pkeys.h>
42 #include <linux/seq_buf.h>
45 #include <asm/processor.h>
48 #include <asm/machdep.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
54 #include <asm/debug.h>
56 #include <asm/firmware.h>
57 #include <asm/hw_irq.h>
59 #include <asm/code-patching.h>
61 #include <asm/livepatch.h>
62 #include <asm/cpu_has_feature.h>
63 #include <asm/asm-prototypes.h>
64 #include <asm/stacktrace.h>
65 #include <asm/hw_breakpoint.h>
67 #include <linux/kprobes.h>
68 #include <linux/kdebug.h>
70 /* Transactional Memory debug */
72 #define TM_DEBUG(x...) printk(KERN_INFO x)
74 #define TM_DEBUG(x...) do { } while(0)
77 extern unsigned long _get_SP(void);
79 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81 * Are we running in "Suspend disabled" mode? If so we have to block any
82 * sigreturn that would get us into suspended state, and we also warn in some
83 * other paths that we should never reach with suspend disabled.
85 bool tm_suspend_disabled __ro_after_init = false;
87 static void check_if_tm_restore_required(struct task_struct *tsk)
90 * If we are saving the current thread's registers, and the
91 * thread is in a transactional state, set the TIF_RESTORE_TM
92 * bit so that we know to restore the registers before
93 * returning to userspace.
95 if (tsk == current && tsk->thread.regs &&
96 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
97 !test_thread_flag(TIF_RESTORE_TM)) {
98 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
99 set_thread_flag(TIF_RESTORE_TM);
104 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
105 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
107 bool strict_msr_control;
108 EXPORT_SYMBOL(strict_msr_control);
110 static int __init enable_strict_msr_control(char *str)
112 strict_msr_control = true;
113 pr_info("Enabling strict facility control\n");
117 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
119 /* notrace because it's called by restore_math */
120 unsigned long notrace msr_check_and_set(unsigned long bits)
122 unsigned long oldmsr = mfmsr();
123 unsigned long newmsr;
125 newmsr = oldmsr | bits;
128 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
132 if (oldmsr != newmsr)
137 EXPORT_SYMBOL_GPL(msr_check_and_set);
139 /* notrace because it's called by restore_math */
140 void notrace __msr_check_and_clear(unsigned long bits)
142 unsigned long oldmsr = mfmsr();
143 unsigned long newmsr;
145 newmsr = oldmsr & ~bits;
148 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
152 if (oldmsr != newmsr)
155 EXPORT_SYMBOL(__msr_check_and_clear);
157 #ifdef CONFIG_PPC_FPU
158 static void __giveup_fpu(struct task_struct *tsk)
163 msr = tsk->thread.regs->msr;
164 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
166 if (cpu_has_feature(CPU_FTR_VSX))
169 tsk->thread.regs->msr = msr;
172 void giveup_fpu(struct task_struct *tsk)
174 check_if_tm_restore_required(tsk);
176 msr_check_and_set(MSR_FP);
178 msr_check_and_clear(MSR_FP);
180 EXPORT_SYMBOL(giveup_fpu);
183 * Make sure the floating-point register state in the
184 * the thread_struct is up to date for task tsk.
186 void flush_fp_to_thread(struct task_struct *tsk)
188 if (tsk->thread.regs) {
190 * We need to disable preemption here because if we didn't,
191 * another process could get scheduled after the regs->msr
192 * test but before we have finished saving the FP registers
193 * to the thread_struct. That process could take over the
194 * FPU, and then when we get scheduled again we would store
195 * bogus values for the remaining FP registers.
198 if (tsk->thread.regs->msr & MSR_FP) {
200 * This should only ever be called for current or
201 * for a stopped child process. Since we save away
202 * the FP register state on context switch,
203 * there is something wrong if a stopped child appears
204 * to still have its FP state in the CPU registers.
206 BUG_ON(tsk != current);
212 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
214 void enable_kernel_fp(void)
216 unsigned long cpumsr;
218 WARN_ON(preemptible());
220 cpumsr = msr_check_and_set(MSR_FP);
222 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
223 check_if_tm_restore_required(current);
225 * If a thread has already been reclaimed then the
226 * checkpointed registers are on the CPU but have definitely
227 * been saved by the reclaim code. Don't need to and *cannot*
228 * giveup as this would save to the 'live' structure not the
229 * checkpointed structure.
231 if (!MSR_TM_ACTIVE(cpumsr) &&
232 MSR_TM_ACTIVE(current->thread.regs->msr))
234 __giveup_fpu(current);
237 EXPORT_SYMBOL(enable_kernel_fp);
238 #endif /* CONFIG_PPC_FPU */
240 #ifdef CONFIG_ALTIVEC
241 static void __giveup_altivec(struct task_struct *tsk)
246 msr = tsk->thread.regs->msr;
249 if (cpu_has_feature(CPU_FTR_VSX))
252 tsk->thread.regs->msr = msr;
255 void giveup_altivec(struct task_struct *tsk)
257 check_if_tm_restore_required(tsk);
259 msr_check_and_set(MSR_VEC);
260 __giveup_altivec(tsk);
261 msr_check_and_clear(MSR_VEC);
263 EXPORT_SYMBOL(giveup_altivec);
265 void enable_kernel_altivec(void)
267 unsigned long cpumsr;
269 WARN_ON(preemptible());
271 cpumsr = msr_check_and_set(MSR_VEC);
273 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
274 check_if_tm_restore_required(current);
276 * If a thread has already been reclaimed then the
277 * checkpointed registers are on the CPU but have definitely
278 * been saved by the reclaim code. Don't need to and *cannot*
279 * giveup as this would save to the 'live' structure not the
280 * checkpointed structure.
282 if (!MSR_TM_ACTIVE(cpumsr) &&
283 MSR_TM_ACTIVE(current->thread.regs->msr))
285 __giveup_altivec(current);
288 EXPORT_SYMBOL(enable_kernel_altivec);
291 * Make sure the VMX/Altivec register state in the
292 * the thread_struct is up to date for task tsk.
294 void flush_altivec_to_thread(struct task_struct *tsk)
296 if (tsk->thread.regs) {
298 if (tsk->thread.regs->msr & MSR_VEC) {
299 BUG_ON(tsk != current);
305 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
306 #endif /* CONFIG_ALTIVEC */
309 static void __giveup_vsx(struct task_struct *tsk)
311 unsigned long msr = tsk->thread.regs->msr;
314 * We should never be ssetting MSR_VSX without also setting
317 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
319 /* __giveup_fpu will clear MSR_VSX */
323 __giveup_altivec(tsk);
326 static void giveup_vsx(struct task_struct *tsk)
328 check_if_tm_restore_required(tsk);
330 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
332 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
335 void enable_kernel_vsx(void)
337 unsigned long cpumsr;
339 WARN_ON(preemptible());
341 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
343 if (current->thread.regs &&
344 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
345 check_if_tm_restore_required(current);
347 * If a thread has already been reclaimed then the
348 * checkpointed registers are on the CPU but have definitely
349 * been saved by the reclaim code. Don't need to and *cannot*
350 * giveup as this would save to the 'live' structure not the
351 * checkpointed structure.
353 if (!MSR_TM_ACTIVE(cpumsr) &&
354 MSR_TM_ACTIVE(current->thread.regs->msr))
356 __giveup_vsx(current);
359 EXPORT_SYMBOL(enable_kernel_vsx);
361 void flush_vsx_to_thread(struct task_struct *tsk)
363 if (tsk->thread.regs) {
365 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
366 BUG_ON(tsk != current);
372 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
373 #endif /* CONFIG_VSX */
376 void giveup_spe(struct task_struct *tsk)
378 check_if_tm_restore_required(tsk);
380 msr_check_and_set(MSR_SPE);
382 msr_check_and_clear(MSR_SPE);
384 EXPORT_SYMBOL(giveup_spe);
386 void enable_kernel_spe(void)
388 WARN_ON(preemptible());
390 msr_check_and_set(MSR_SPE);
392 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
393 check_if_tm_restore_required(current);
394 __giveup_spe(current);
397 EXPORT_SYMBOL(enable_kernel_spe);
399 void flush_spe_to_thread(struct task_struct *tsk)
401 if (tsk->thread.regs) {
403 if (tsk->thread.regs->msr & MSR_SPE) {
404 BUG_ON(tsk != current);
405 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
411 #endif /* CONFIG_SPE */
413 static unsigned long msr_all_available;
415 static int __init init_msr_all_available(void)
417 #ifdef CONFIG_PPC_FPU
418 msr_all_available |= MSR_FP;
420 #ifdef CONFIG_ALTIVEC
421 if (cpu_has_feature(CPU_FTR_ALTIVEC))
422 msr_all_available |= MSR_VEC;
425 if (cpu_has_feature(CPU_FTR_VSX))
426 msr_all_available |= MSR_VSX;
429 if (cpu_has_feature(CPU_FTR_SPE))
430 msr_all_available |= MSR_SPE;
435 early_initcall(init_msr_all_available);
437 void giveup_all(struct task_struct *tsk)
439 unsigned long usermsr;
441 if (!tsk->thread.regs)
444 check_if_tm_restore_required(tsk);
446 usermsr = tsk->thread.regs->msr;
448 if ((usermsr & msr_all_available) == 0)
451 msr_check_and_set(msr_all_available);
453 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
455 #ifdef CONFIG_PPC_FPU
456 if (usermsr & MSR_FP)
459 #ifdef CONFIG_ALTIVEC
460 if (usermsr & MSR_VEC)
461 __giveup_altivec(tsk);
464 if (usermsr & MSR_SPE)
468 msr_check_and_clear(msr_all_available);
470 EXPORT_SYMBOL(giveup_all);
472 #ifdef CONFIG_PPC_BOOK3S_64
473 #ifdef CONFIG_PPC_FPU
474 static bool should_restore_fp(void)
476 if (current->thread.load_fp) {
477 current->thread.load_fp++;
483 static void do_restore_fp(void)
485 load_fp_state(¤t->thread.fp_state);
488 static bool should_restore_fp(void) { return false; }
489 static void do_restore_fp(void) { }
490 #endif /* CONFIG_PPC_FPU */
492 #ifdef CONFIG_ALTIVEC
493 static bool should_restore_altivec(void)
495 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (current->thread.load_vec)) {
496 current->thread.load_vec++;
502 static void do_restore_altivec(void)
504 load_vr_state(¤t->thread.vr_state);
505 current->thread.used_vr = 1;
508 static bool should_restore_altivec(void) { return false; }
509 static void do_restore_altivec(void) { }
510 #endif /* CONFIG_ALTIVEC */
513 static bool should_restore_vsx(void)
515 if (cpu_has_feature(CPU_FTR_VSX))
519 static void do_restore_vsx(void)
521 current->thread.used_vsr = 1;
524 static bool should_restore_vsx(void) { return false; }
525 static void do_restore_vsx(void) { }
526 #endif /* CONFIG_VSX */
529 * The exception exit path calls restore_math() with interrupts hard disabled
530 * but the soft irq state not "reconciled". ftrace code that calls
531 * local_irq_save/restore causes warnings.
533 * Rather than complicate the exit path, just don't trace restore_math. This
534 * could be done by having ftrace entry code check for this un-reconciled
535 * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
536 * temporarily fix it up for the duration of the ftrace call.
538 void notrace restore_math(struct pt_regs *regs)
541 unsigned long new_msr = 0;
546 * new_msr tracks the facilities that are to be restored. Only reload
547 * if the bit is not set in the user MSR (if it is set, the registers
548 * are live for the user thread).
550 if ((!(msr & MSR_FP)) && should_restore_fp())
551 new_msr |= MSR_FP | current->thread.fpexc_mode;
553 if ((!(msr & MSR_VEC)) && should_restore_altivec())
556 if ((!(msr & MSR_VSX)) && should_restore_vsx()) {
557 if (((msr | new_msr) & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC))
562 msr_check_and_set(new_msr);
564 if (new_msr & MSR_FP)
567 if (new_msr & MSR_VEC)
568 do_restore_altivec();
570 if (new_msr & MSR_VSX)
573 msr_check_and_clear(new_msr);
575 regs->msr |= new_msr;
580 static void save_all(struct task_struct *tsk)
582 unsigned long usermsr;
584 if (!tsk->thread.regs)
587 usermsr = tsk->thread.regs->msr;
589 if ((usermsr & msr_all_available) == 0)
592 msr_check_and_set(msr_all_available);
594 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
596 if (usermsr & MSR_FP)
599 if (usermsr & MSR_VEC)
602 if (usermsr & MSR_SPE)
605 msr_check_and_clear(msr_all_available);
606 thread_pkey_regs_save(&tsk->thread);
609 void flush_all_to_thread(struct task_struct *tsk)
611 if (tsk->thread.regs) {
613 BUG_ON(tsk != current);
615 if (tsk->thread.regs->msr & MSR_SPE)
616 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
623 EXPORT_SYMBOL(flush_all_to_thread);
625 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
626 void do_send_trap(struct pt_regs *regs, unsigned long address,
627 unsigned long error_code, int breakpt)
629 current->thread.trap_nr = TRAP_HWBKPT;
630 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
631 11, SIGSEGV) == NOTIFY_STOP)
634 /* Deliver the signal to userspace */
635 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
636 (void __user *)address);
638 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
639 void do_break (struct pt_regs *regs, unsigned long address,
640 unsigned long error_code)
642 current->thread.trap_nr = TRAP_HWBKPT;
643 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
644 11, SIGSEGV) == NOTIFY_STOP)
647 if (debugger_break_match(regs))
650 /* Deliver the signal to userspace */
651 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
653 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
655 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk[HBP_NUM_MAX]);
657 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
659 * Set the debug registers back to their default "safe" values.
661 static void set_debug_reg_defaults(struct thread_struct *thread)
663 thread->debug.iac1 = thread->debug.iac2 = 0;
664 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
665 thread->debug.iac3 = thread->debug.iac4 = 0;
667 thread->debug.dac1 = thread->debug.dac2 = 0;
668 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
669 thread->debug.dvc1 = thread->debug.dvc2 = 0;
671 thread->debug.dbcr0 = 0;
674 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
676 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
677 DBCR1_IAC3US | DBCR1_IAC4US;
679 * Force Data Address Compare User/Supervisor bits to be User-only
680 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
682 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
684 thread->debug.dbcr1 = 0;
688 static void prime_debug_regs(struct debug_reg *debug)
691 * We could have inherited MSR_DE from userspace, since
692 * it doesn't get cleared on exception entry. Make sure
693 * MSR_DE is clear before we enable any debug events.
695 mtmsr(mfmsr() & ~MSR_DE);
697 mtspr(SPRN_IAC1, debug->iac1);
698 mtspr(SPRN_IAC2, debug->iac2);
699 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
700 mtspr(SPRN_IAC3, debug->iac3);
701 mtspr(SPRN_IAC4, debug->iac4);
703 mtspr(SPRN_DAC1, debug->dac1);
704 mtspr(SPRN_DAC2, debug->dac2);
705 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
706 mtspr(SPRN_DVC1, debug->dvc1);
707 mtspr(SPRN_DVC2, debug->dvc2);
709 mtspr(SPRN_DBCR0, debug->dbcr0);
710 mtspr(SPRN_DBCR1, debug->dbcr1);
712 mtspr(SPRN_DBCR2, debug->dbcr2);
716 * Unless neither the old or new thread are making use of the
717 * debug registers, set the debug registers from the values
718 * stored in the new thread.
720 void switch_booke_debug_regs(struct debug_reg *new_debug)
722 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
723 || (new_debug->dbcr0 & DBCR0_IDM))
724 prime_debug_regs(new_debug);
726 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
727 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
728 #ifndef CONFIG_HAVE_HW_BREAKPOINT
729 static void set_breakpoint(int i, struct arch_hw_breakpoint *brk)
732 __set_breakpoint(i, brk);
736 static void set_debug_reg_defaults(struct thread_struct *thread)
739 struct arch_hw_breakpoint null_brk = {0};
741 for (i = 0; i < nr_wp_slots(); i++) {
742 thread->hw_brk[i] = null_brk;
743 if (ppc_breakpoint_available())
744 set_breakpoint(i, &thread->hw_brk[i]);
748 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
749 struct arch_hw_breakpoint *b)
751 if (a->address != b->address)
753 if (a->type != b->type)
755 if (a->len != b->len)
757 /* no need to check hw_len. it's calculated from address and len */
761 static void switch_hw_breakpoint(struct task_struct *new)
765 for (i = 0; i < nr_wp_slots(); i++) {
766 if (likely(hw_brk_match(this_cpu_ptr(¤t_brk[i]),
767 &new->thread.hw_brk[i])))
770 __set_breakpoint(i, &new->thread.hw_brk[i]);
773 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
774 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
776 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
777 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
779 mtspr(SPRN_DAC1, dabr);
780 #ifdef CONFIG_PPC_47x
785 #elif defined(CONFIG_PPC_BOOK3S)
786 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
788 mtspr(SPRN_DABR, dabr);
789 if (cpu_has_feature(CPU_FTR_DABRX))
790 mtspr(SPRN_DABRX, dabrx);
794 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
800 static inline int set_dabr(struct arch_hw_breakpoint *brk)
802 unsigned long dabr, dabrx;
804 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
805 dabrx = ((brk->type >> 3) & 0x7);
808 return ppc_md.set_dabr(dabr, dabrx);
810 return __set_dabr(dabr, dabrx);
813 static inline int set_breakpoint_8xx(struct arch_hw_breakpoint *brk)
815 unsigned long lctrl1 = LCTRL1_CTE_GT | LCTRL1_CTF_LT | LCTRL1_CRWE_RW |
817 unsigned long lctrl2 = LCTRL2_LW0EN | LCTRL2_LW0LADC | LCTRL2_SLW0EN;
818 unsigned long start_addr = ALIGN_DOWN(brk->address, HW_BREAKPOINT_SIZE);
819 unsigned long end_addr = ALIGN(brk->address + brk->len, HW_BREAKPOINT_SIZE);
822 lctrl2 |= LCTRL2_LW0LA_F;
823 else if (end_addr == 0)
824 lctrl2 |= LCTRL2_LW0LA_E;
826 lctrl2 |= LCTRL2_LW0LA_EandF;
828 mtspr(SPRN_LCTRL2, 0);
830 if ((brk->type & HW_BRK_TYPE_RDWR) == 0)
833 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
834 lctrl1 |= LCTRL1_CRWE_RO | LCTRL1_CRWF_RO;
835 if ((brk->type & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
836 lctrl1 |= LCTRL1_CRWE_WO | LCTRL1_CRWF_WO;
838 mtspr(SPRN_CMPE, start_addr - 1);
839 mtspr(SPRN_CMPF, end_addr);
840 mtspr(SPRN_LCTRL1, lctrl1);
841 mtspr(SPRN_LCTRL2, lctrl2);
846 void __set_breakpoint(int nr, struct arch_hw_breakpoint *brk)
848 memcpy(this_cpu_ptr(¤t_brk[nr]), brk, sizeof(*brk));
853 else if (IS_ENABLED(CONFIG_PPC_8xx))
854 set_breakpoint_8xx(brk);
855 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
859 // Shouldn't happen due to higher level checks
863 /* Check if we have DAWR or DABR hardware */
864 bool ppc_breakpoint_available(void)
867 return true; /* POWER8 DAWR or POWER9 forced DAWR */
868 if (cpu_has_feature(CPU_FTR_ARCH_207S))
869 return false; /* POWER9 with DAWR disabled */
870 /* DABR: Everything but POWER8 and POWER9 */
873 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
875 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
877 static inline bool tm_enabled(struct task_struct *tsk)
879 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
882 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
885 * Use the current MSR TM suspended bit to track if we have
886 * checkpointed state outstanding.
887 * On signal delivery, we'd normally reclaim the checkpointed
888 * state to obtain stack pointer (see:get_tm_stackpointer()).
889 * This will then directly return to userspace without going
890 * through __switch_to(). However, if the stack frame is bad,
891 * we need to exit this thread which calls __switch_to() which
892 * will again attempt to reclaim the already saved tm state.
893 * Hence we need to check that we've not already reclaimed
895 * We do this using the current MSR, rather tracking it in
896 * some specific thread_struct bit, as it has the additional
897 * benefit of checking for a potential TM bad thing exception.
899 if (!MSR_TM_SUSPENDED(mfmsr()))
902 giveup_all(container_of(thr, struct task_struct, thread));
904 tm_reclaim(thr, cause);
907 * If we are in a transaction and FP is off then we can't have
908 * used FP inside that transaction. Hence the checkpointed
909 * state is the same as the live state. We need to copy the
910 * live state to the checkpointed state so that when the
911 * transaction is restored, the checkpointed state is correct
912 * and the aborted transaction sees the correct state. We use
913 * ckpt_regs.msr here as that's what tm_reclaim will use to
914 * determine if it's going to write the checkpointed state or
915 * not. So either this will write the checkpointed registers,
916 * or reclaim will. Similarly for VMX.
918 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
919 memcpy(&thr->ckfp_state, &thr->fp_state,
920 sizeof(struct thread_fp_state));
921 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
922 memcpy(&thr->ckvr_state, &thr->vr_state,
923 sizeof(struct thread_vr_state));
926 void tm_reclaim_current(uint8_t cause)
929 tm_reclaim_thread(¤t->thread, cause);
932 static inline void tm_reclaim_task(struct task_struct *tsk)
934 /* We have to work out if we're switching from/to a task that's in the
935 * middle of a transaction.
937 * In switching we need to maintain a 2nd register state as
938 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
939 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
942 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
944 struct thread_struct *thr = &tsk->thread;
949 if (!MSR_TM_ACTIVE(thr->regs->msr))
950 goto out_and_saveregs;
952 WARN_ON(tm_suspend_disabled);
954 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
955 "ccr=%lx, msr=%lx, trap=%lx)\n",
956 tsk->pid, thr->regs->nip,
957 thr->regs->ccr, thr->regs->msr,
960 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
962 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
966 /* Always save the regs here, even if a transaction's not active.
967 * This context-switches a thread's TM info SPRs. We do it here to
968 * be consistent with the restore path (in recheckpoint) which
969 * cannot happen later in _switch().
974 extern void __tm_recheckpoint(struct thread_struct *thread);
976 void tm_recheckpoint(struct thread_struct *thread)
980 if (!(thread->regs->msr & MSR_TM))
983 /* We really can't be interrupted here as the TEXASR registers can't
984 * change and later in the trecheckpoint code, we have a userspace R1.
985 * So let's hard disable over this region.
987 local_irq_save(flags);
990 /* The TM SPRs are restored here, so that TEXASR.FS can be set
991 * before the trecheckpoint and no explosion occurs.
993 tm_restore_sprs(thread);
995 __tm_recheckpoint(thread);
997 local_irq_restore(flags);
1000 static inline void tm_recheckpoint_new_task(struct task_struct *new)
1002 if (!cpu_has_feature(CPU_FTR_TM))
1005 /* Recheckpoint the registers of the thread we're about to switch to.
1007 * If the task was using FP, we non-lazily reload both the original and
1008 * the speculative FP register states. This is because the kernel
1009 * doesn't see if/when a TM rollback occurs, so if we take an FP
1010 * unavailable later, we are unable to determine which set of FP regs
1011 * need to be restored.
1013 if (!tm_enabled(new))
1016 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
1017 tm_restore_sprs(&new->thread);
1020 /* Recheckpoint to restore original checkpointed register state. */
1021 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
1022 new->pid, new->thread.regs->msr);
1024 tm_recheckpoint(&new->thread);
1027 * The checkpointed state has been restored but the live state has
1028 * not, ensure all the math functionality is turned off to trigger
1029 * restore_math() to reload.
1031 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
1033 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
1034 "(kernel msr 0x%lx)\n",
1038 static inline void __switch_to_tm(struct task_struct *prev,
1039 struct task_struct *new)
1041 if (cpu_has_feature(CPU_FTR_TM)) {
1042 if (tm_enabled(prev) || tm_enabled(new))
1045 if (tm_enabled(prev)) {
1046 prev->thread.load_tm++;
1047 tm_reclaim_task(prev);
1048 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
1049 prev->thread.regs->msr &= ~MSR_TM;
1052 tm_recheckpoint_new_task(new);
1057 * This is called if we are on the way out to userspace and the
1058 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1059 * FP and/or vector state and does so if necessary.
1060 * If userspace is inside a transaction (whether active or
1061 * suspended) and FP/VMX/VSX instructions have ever been enabled
1062 * inside that transaction, then we have to keep them enabled
1063 * and keep the FP/VMX/VSX state loaded while ever the transaction
1064 * continues. The reason is that if we didn't, and subsequently
1065 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1066 * we don't know whether it's the same transaction, and thus we
1067 * don't know which of the checkpointed state and the transactional
1070 void restore_tm_state(struct pt_regs *regs)
1072 unsigned long msr_diff;
1075 * This is the only moment we should clear TIF_RESTORE_TM as
1076 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1077 * again, anything else could lead to an incorrect ckpt_msr being
1078 * saved and therefore incorrect signal contexts.
1080 clear_thread_flag(TIF_RESTORE_TM);
1081 if (!MSR_TM_ACTIVE(regs->msr))
1084 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1085 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1087 /* Ensure that restore_math() will restore */
1088 if (msr_diff & MSR_FP)
1089 current->thread.load_fp = 1;
1090 #ifdef CONFIG_ALTIVEC
1091 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1092 current->thread.load_vec = 1;
1096 regs->msr |= msr_diff;
1100 #define tm_recheckpoint_new_task(new)
1101 #define __switch_to_tm(prev, new)
1102 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1104 static inline void save_sprs(struct thread_struct *t)
1106 #ifdef CONFIG_ALTIVEC
1107 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1108 t->vrsave = mfspr(SPRN_VRSAVE);
1110 #ifdef CONFIG_PPC_BOOK3S_64
1111 if (cpu_has_feature(CPU_FTR_DSCR))
1112 t->dscr = mfspr(SPRN_DSCR);
1114 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1115 t->bescr = mfspr(SPRN_BESCR);
1116 t->ebbhr = mfspr(SPRN_EBBHR);
1117 t->ebbrr = mfspr(SPRN_EBBRR);
1119 t->fscr = mfspr(SPRN_FSCR);
1122 * Note that the TAR is not available for use in the kernel.
1123 * (To provide this, the TAR should be backed up/restored on
1124 * exception entry/exit instead, and be in pt_regs. FIXME,
1125 * this should be in pt_regs anyway (for debug).)
1127 t->tar = mfspr(SPRN_TAR);
1131 thread_pkey_regs_save(t);
1134 static inline void restore_sprs(struct thread_struct *old_thread,
1135 struct thread_struct *new_thread)
1137 #ifdef CONFIG_ALTIVEC
1138 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1139 old_thread->vrsave != new_thread->vrsave)
1140 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1142 #ifdef CONFIG_PPC_BOOK3S_64
1143 if (cpu_has_feature(CPU_FTR_DSCR)) {
1144 u64 dscr = get_paca()->dscr_default;
1145 if (new_thread->dscr_inherit)
1146 dscr = new_thread->dscr;
1148 if (old_thread->dscr != dscr)
1149 mtspr(SPRN_DSCR, dscr);
1152 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1153 if (old_thread->bescr != new_thread->bescr)
1154 mtspr(SPRN_BESCR, new_thread->bescr);
1155 if (old_thread->ebbhr != new_thread->ebbhr)
1156 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1157 if (old_thread->ebbrr != new_thread->ebbrr)
1158 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1160 if (old_thread->fscr != new_thread->fscr)
1161 mtspr(SPRN_FSCR, new_thread->fscr);
1163 if (old_thread->tar != new_thread->tar)
1164 mtspr(SPRN_TAR, new_thread->tar);
1167 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1168 old_thread->tidr != new_thread->tidr)
1169 mtspr(SPRN_TIDR, new_thread->tidr);
1172 thread_pkey_regs_restore(new_thread, old_thread);
1175 struct task_struct *__switch_to(struct task_struct *prev,
1176 struct task_struct *new)
1178 struct thread_struct *new_thread, *old_thread;
1179 struct task_struct *last;
1180 #ifdef CONFIG_PPC_BOOK3S_64
1181 struct ppc64_tlb_batch *batch;
1184 new_thread = &new->thread;
1185 old_thread = ¤t->thread;
1187 WARN_ON(!irqs_disabled());
1189 #ifdef CONFIG_PPC_BOOK3S_64
1190 batch = this_cpu_ptr(&ppc64_tlb_batch);
1191 if (batch->active) {
1192 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1194 __flush_tlb_pending(batch);
1197 #endif /* CONFIG_PPC_BOOK3S_64 */
1199 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1200 switch_booke_debug_regs(&new->thread.debug);
1203 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1206 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1207 switch_hw_breakpoint(new);
1208 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1212 * We need to save SPRs before treclaim/trecheckpoint as these will
1213 * change a number of them.
1215 save_sprs(&prev->thread);
1217 /* Save FPU, Altivec, VSX and SPE state */
1220 __switch_to_tm(prev, new);
1222 if (!radix_enabled()) {
1224 * We can't take a PMU exception inside _switch() since there
1225 * is a window where the kernel stack SLB and the kernel stack
1226 * are out of sync. Hard disable here.
1232 * Call restore_sprs() before calling _switch(). If we move it after
1233 * _switch() then we miss out on calling it for new tasks. The reason
1234 * for this is we manually create a stack frame for new tasks that
1235 * directly returns through ret_from_fork() or
1236 * ret_from_kernel_thread(). See copy_thread() for details.
1238 restore_sprs(old_thread, new_thread);
1240 last = _switch(old_thread, new_thread);
1242 #ifdef CONFIG_PPC_BOOK3S_64
1243 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1244 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1245 batch = this_cpu_ptr(&ppc64_tlb_batch);
1249 if (current->thread.regs) {
1250 restore_math(current->thread.regs);
1253 * The copy-paste buffer can only store into foreign real
1254 * addresses, so unprivileged processes can not see the
1255 * data or use it in any way unless they have foreign real
1256 * mappings. If the new process has the foreign real address
1257 * mappings, we must issue a cp_abort to clear any state and
1258 * prevent snooping, corruption or a covert channel.
1261 atomic_read(¤t->mm->context.vas_windows))
1262 asm volatile(PPC_CP_ABORT);
1264 #endif /* CONFIG_PPC_BOOK3S_64 */
1269 #define NR_INSN_TO_PRINT 16
1271 static void show_instructions(struct pt_regs *regs)
1274 unsigned long nip = regs->nip;
1275 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1277 printk("Instruction dump:");
1280 * If we were executing with the MMU off for instructions, adjust pc
1281 * rather than printing XXXXXXXX.
1283 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1284 pc = (unsigned long)phys_to_virt(pc);
1285 nip = (unsigned long)phys_to_virt(regs->nip);
1288 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1294 if (!__kernel_text_address(pc) ||
1295 get_kernel_nofault(instr, (const void *)pc)) {
1296 pr_cont("XXXXXXXX ");
1299 pr_cont("<%08x> ", instr);
1301 pr_cont("%08x ", instr);
1310 void show_user_instructions(struct pt_regs *regs)
1313 int n = NR_INSN_TO_PRINT;
1315 char buf[96]; /* enough for 8 times 9 + 2 chars */
1317 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1319 seq_buf_init(&s, buf, sizeof(buf));
1326 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1329 if (copy_from_user_nofault(&instr, (void __user *)pc,
1331 seq_buf_printf(&s, "XXXXXXXX ");
1334 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1337 if (!seq_buf_has_overflowed(&s))
1338 pr_info("%s[%d]: code: %s\n", current->comm,
1339 current->pid, s.buffer);
1348 static struct regbit msr_bits[] = {
1349 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1371 #ifndef CONFIG_BOOKE
1378 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1382 for (; bits->bit; ++bits)
1383 if (val & bits->bit) {
1384 pr_cont("%s%s", s, bits->name);
1389 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1390 static struct regbit msr_tm_bits[] = {
1397 static void print_tm_bits(unsigned long val)
1400 * This only prints something if at least one of the TM bit is set.
1401 * Inside the TM[], the output means:
1402 * E: Enabled (bit 32)
1403 * S: Suspended (bit 33)
1404 * T: Transactional (bit 34)
1406 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1408 print_bits(val, msr_tm_bits, "");
1413 static void print_tm_bits(unsigned long val) {}
1416 static void print_msr_bits(unsigned long val)
1419 print_bits(val, msr_bits, ",");
1425 #define REG "%016lx"
1426 #define REGS_PER_LINE 4
1427 #define LAST_VOLATILE 13
1430 #define REGS_PER_LINE 8
1431 #define LAST_VOLATILE 12
1434 void show_regs(struct pt_regs * regs)
1438 show_regs_print_info(KERN_DEFAULT);
1440 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1441 regs->nip, regs->link, regs->ctr);
1442 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1443 regs, regs->trap, print_tainted(), init_utsname()->release);
1444 printk("MSR: "REG" ", regs->msr);
1445 print_msr_bits(regs->msr);
1446 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1448 if (!trap_is_syscall(regs) && cpu_has_feature(CPU_FTR_CFAR))
1449 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1450 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1451 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1452 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1454 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1457 pr_cont("IRQMASK: %lx ", regs->softe);
1459 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1460 if (MSR_TM_ACTIVE(regs->msr))
1461 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1464 for (i = 0; i < 32; i++) {
1465 if ((i % REGS_PER_LINE) == 0)
1466 pr_cont("\nGPR%02d: ", i);
1467 pr_cont(REG " ", regs->gpr[i]);
1468 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1472 #ifdef CONFIG_KALLSYMS
1474 * Lookup NIP late so we have the best change of getting the
1475 * above info out without failing
1477 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1478 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1480 show_stack(current, (unsigned long *) regs->gpr[1], KERN_DEFAULT);
1481 if (!user_mode(regs))
1482 show_instructions(regs);
1485 void flush_thread(void)
1487 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1488 flush_ptrace_hw_breakpoint(current);
1489 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1490 set_debug_reg_defaults(¤t->thread);
1491 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1494 #ifdef CONFIG_PPC_BOOK3S_64
1495 void arch_setup_new_exec(void)
1497 if (radix_enabled())
1499 hash__setup_new_exec();
1505 * Assign a TIDR (thread ID) for task @t and set it in the thread
1506 * structure. For now, we only support setting TIDR for 'current' task.
1508 * Since the TID value is a truncated form of it PID, it is possible
1509 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1510 * that 2 threads share the same TID and are waiting, one of the following
1511 * cases will happen:
1513 * 1. The correct thread is running, the wrong thread is not
1514 * In this situation, the correct thread is woken and proceeds to pass it's
1517 * 2. Neither threads are running
1518 * In this situation, neither thread will be woken. When scheduled, the waiting
1519 * threads will execute either a wait, which will return immediately, followed
1520 * by a condition check, which will pass for the correct thread and fail
1521 * for the wrong thread, or they will execute the condition check immediately.
1523 * 3. The wrong thread is running, the correct thread is not
1524 * The wrong thread will be woken, but will fail it's condition check and
1525 * re-execute wait. The correct thread, when scheduled, will execute either
1526 * it's condition check (which will pass), or wait, which returns immediately
1527 * when called the first time after the thread is scheduled, followed by it's
1528 * condition check (which will pass).
1530 * 4. Both threads are running
1531 * Both threads will be woken. The wrong thread will fail it's condition check
1532 * and execute another wait, while the correct thread will pass it's condition
1535 * @t: the task to set the thread ID for
1537 int set_thread_tidr(struct task_struct *t)
1539 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1548 t->thread.tidr = (u16)task_pid_nr(t);
1549 mtspr(SPRN_TIDR, t->thread.tidr);
1553 EXPORT_SYMBOL_GPL(set_thread_tidr);
1555 #endif /* CONFIG_PPC64 */
1558 release_thread(struct task_struct *t)
1563 * this gets called so that we can store coprocessor state into memory and
1564 * copy the current task into the new thread.
1566 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1568 flush_all_to_thread(src);
1570 * Flush TM state out so we can copy it. __switch_to_tm() does this
1571 * flush but it removes the checkpointed state from the current CPU and
1572 * transitions the CPU out of TM mode. Hence we need to call
1573 * tm_recheckpoint_new_task() (on the same task) to restore the
1574 * checkpointed state back and the TM mode.
1576 * Can't pass dst because it isn't ready. Doesn't matter, passing
1577 * dst is only important for __switch_to()
1579 __switch_to_tm(src, src);
1583 clear_task_ebb(dst);
1588 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1590 #ifdef CONFIG_PPC_BOOK3S_64
1591 unsigned long sp_vsid;
1592 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1594 if (radix_enabled())
1597 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1598 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1599 << SLB_VSID_SHIFT_1T;
1601 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1603 sp_vsid |= SLB_VSID_KERNEL | llp;
1604 p->thread.ksp_vsid = sp_vsid;
1613 * Copy architecture-specific thread state
1615 int copy_thread(unsigned long clone_flags, unsigned long usp,
1616 unsigned long kthread_arg, struct task_struct *p,
1619 struct pt_regs *childregs, *kregs;
1620 extern void ret_from_fork(void);
1621 extern void ret_from_fork_scv(void);
1622 extern void ret_from_kernel_thread(void);
1624 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1625 struct thread_info *ti = task_thread_info(p);
1626 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1630 klp_init_thread_info(p);
1632 /* Copy registers */
1633 sp -= sizeof(struct pt_regs);
1634 childregs = (struct pt_regs *) sp;
1635 if (unlikely(p->flags & PF_KTHREAD)) {
1637 memset(childregs, 0, sizeof(struct pt_regs));
1638 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1641 childregs->gpr[14] = ppc_function_entry((void *)usp);
1643 clear_tsk_thread_flag(p, TIF_32BIT);
1644 childregs->softe = IRQS_ENABLED;
1646 childregs->gpr[15] = kthread_arg;
1647 p->thread.regs = NULL; /* no user register state */
1648 ti->flags |= _TIF_RESTOREALL;
1649 f = ret_from_kernel_thread;
1652 struct pt_regs *regs = current_pt_regs();
1653 CHECK_FULL_REGS(regs);
1656 childregs->gpr[1] = usp;
1657 p->thread.regs = childregs;
1658 /* 64s sets this in ret_from_fork */
1659 if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
1660 childregs->gpr[3] = 0; /* Result from fork() */
1661 if (clone_flags & CLONE_SETTLS) {
1662 if (!is_32bit_task())
1663 childregs->gpr[13] = tls;
1665 childregs->gpr[2] = tls;
1668 if (trap_is_scv(regs))
1669 f = ret_from_fork_scv;
1673 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1674 sp -= STACK_FRAME_OVERHEAD;
1677 * The way this works is that at some point in the future
1678 * some task will call _switch to switch to the new task.
1679 * That will pop off the stack frame created below and start
1680 * the new task running at ret_from_fork. The new task will
1681 * do some house keeping and then return from the fork or clone
1682 * system call, using the stack frame created above.
1684 ((unsigned long *)sp)[0] = 0;
1685 sp -= sizeof(struct pt_regs);
1686 kregs = (struct pt_regs *) sp;
1687 sp -= STACK_FRAME_OVERHEAD;
1690 p->thread.ksp_limit = (unsigned long)end_of_stack(p);
1692 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1693 for (i = 0; i < nr_wp_slots(); i++)
1694 p->thread.ptrace_bps[i] = NULL;
1697 p->thread.fp_save_area = NULL;
1698 #ifdef CONFIG_ALTIVEC
1699 p->thread.vr_save_area = NULL;
1702 setup_ksp_vsid(p, sp);
1705 if (cpu_has_feature(CPU_FTR_DSCR)) {
1706 p->thread.dscr_inherit = current->thread.dscr_inherit;
1707 p->thread.dscr = mfspr(SPRN_DSCR);
1709 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1710 childregs->ppr = DEFAULT_PPR;
1714 kregs->nip = ppc_function_entry(f);
1718 void preload_new_slb_context(unsigned long start, unsigned long sp);
1721 * Set up a thread for executing a new program
1723 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1726 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1728 #ifdef CONFIG_PPC_BOOK3S_64
1729 if (!radix_enabled())
1730 preload_new_slb_context(start, sp);
1735 * If we exec out of a kernel thread then thread.regs will not be
1738 if (!current->thread.regs) {
1739 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1740 current->thread.regs = regs - 1;
1743 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1745 * Clear any transactional state, we're exec()ing. The cause is
1746 * not important as there will never be a recheckpoint so it's not
1749 if (MSR_TM_SUSPENDED(mfmsr()))
1750 tm_reclaim_current(0);
1753 memset(regs->gpr, 0, sizeof(regs->gpr));
1761 * We have just cleared all the nonvolatile GPRs, so make
1762 * FULL_REGS(regs) return true. This is necessary to allow
1763 * ptrace to examine the thread immediately after exec.
1765 SET_FULL_REGS(regs);
1770 regs->msr = MSR_USER;
1772 if (!is_32bit_task()) {
1773 unsigned long entry;
1775 if (is_elf2_task()) {
1776 /* Look ma, no function descriptors! */
1781 * The latest iteration of the ABI requires that when
1782 * calling a function (at its global entry point),
1783 * the caller must ensure r12 holds the entry point
1784 * address (so that the function can quickly
1785 * establish addressability).
1787 regs->gpr[12] = start;
1788 /* Make sure that's restored on entry to userspace. */
1789 set_thread_flag(TIF_RESTOREALL);
1793 /* start is a relocated pointer to the function
1794 * descriptor for the elf _start routine. The first
1795 * entry in the function descriptor is the entry
1796 * address of _start and the second entry is the TOC
1797 * value we need to use.
1799 __get_user(entry, (unsigned long __user *)start);
1800 __get_user(toc, (unsigned long __user *)start+1);
1802 /* Check whether the e_entry function descriptor entries
1803 * need to be relocated before we can use them.
1805 if (load_addr != 0) {
1812 regs->msr = MSR_USER64;
1816 regs->msr = MSR_USER32;
1820 current->thread.used_vsr = 0;
1822 current->thread.load_slb = 0;
1823 current->thread.load_fp = 0;
1824 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1825 current->thread.fp_save_area = NULL;
1826 #ifdef CONFIG_ALTIVEC
1827 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1828 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1829 current->thread.vr_save_area = NULL;
1830 current->thread.vrsave = 0;
1831 current->thread.used_vr = 0;
1832 current->thread.load_vec = 0;
1833 #endif /* CONFIG_ALTIVEC */
1835 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1836 current->thread.acc = 0;
1837 current->thread.spefscr = 0;
1838 current->thread.used_spe = 0;
1839 #endif /* CONFIG_SPE */
1840 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1841 current->thread.tm_tfhar = 0;
1842 current->thread.tm_texasr = 0;
1843 current->thread.tm_tfiar = 0;
1844 current->thread.load_tm = 0;
1845 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1847 thread_pkey_regs_init(¤t->thread);
1849 EXPORT_SYMBOL(start_thread);
1851 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1852 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1854 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1856 struct pt_regs *regs = tsk->thread.regs;
1858 /* This is a bit hairy. If we are an SPE enabled processor
1859 * (have embedded fp) we store the IEEE exception enable flags in
1860 * fpexc_mode. fpexc_mode is also used for setting FP exception
1861 * mode (asyn, precise, disabled) for 'Classic' FP. */
1862 if (val & PR_FP_EXC_SW_ENABLE) {
1864 if (cpu_has_feature(CPU_FTR_SPE)) {
1866 * When the sticky exception bits are set
1867 * directly by userspace, it must call prctl
1868 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1869 * in the existing prctl settings) or
1870 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1871 * the bits being set). <fenv.h> functions
1872 * saving and restoring the whole
1873 * floating-point environment need to do so
1874 * anyway to restore the prctl settings from
1875 * the saved environment.
1877 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1878 tsk->thread.fpexc_mode = val &
1879 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1889 /* on a CONFIG_SPE this does not hurt us. The bits that
1890 * __pack_fe01 use do not overlap with bits used for
1891 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1892 * on CONFIG_SPE implementations are reserved so writing to
1893 * them does not change anything */
1894 if (val > PR_FP_EXC_PRECISE)
1896 tsk->thread.fpexc_mode = __pack_fe01(val);
1897 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1898 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1899 | tsk->thread.fpexc_mode;
1903 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1907 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1909 if (cpu_has_feature(CPU_FTR_SPE)) {
1911 * When the sticky exception bits are set
1912 * directly by userspace, it must call prctl
1913 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1914 * in the existing prctl settings) or
1915 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1916 * the bits being set). <fenv.h> functions
1917 * saving and restoring the whole
1918 * floating-point environment need to do so
1919 * anyway to restore the prctl settings from
1920 * the saved environment.
1922 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1923 val = tsk->thread.fpexc_mode;
1930 val = __unpack_fe01(tsk->thread.fpexc_mode);
1931 return put_user(val, (unsigned int __user *) adr);
1934 int set_endian(struct task_struct *tsk, unsigned int val)
1936 struct pt_regs *regs = tsk->thread.regs;
1938 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1939 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1945 if (val == PR_ENDIAN_BIG)
1946 regs->msr &= ~MSR_LE;
1947 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1948 regs->msr |= MSR_LE;
1955 int get_endian(struct task_struct *tsk, unsigned long adr)
1957 struct pt_regs *regs = tsk->thread.regs;
1960 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1961 !cpu_has_feature(CPU_FTR_REAL_LE))
1967 if (regs->msr & MSR_LE) {
1968 if (cpu_has_feature(CPU_FTR_REAL_LE))
1969 val = PR_ENDIAN_LITTLE;
1971 val = PR_ENDIAN_PPC_LITTLE;
1973 val = PR_ENDIAN_BIG;
1975 return put_user(val, (unsigned int __user *)adr);
1978 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1980 tsk->thread.align_ctl = val;
1984 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1986 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1989 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1990 unsigned long nbytes)
1992 unsigned long stack_page;
1993 unsigned long cpu = task_cpu(p);
1995 stack_page = (unsigned long)hardirq_ctx[cpu];
1996 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1999 stack_page = (unsigned long)softirq_ctx[cpu];
2000 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2006 static inline int valid_emergency_stack(unsigned long sp, struct task_struct *p,
2007 unsigned long nbytes)
2010 unsigned long stack_page;
2011 unsigned long cpu = task_cpu(p);
2013 stack_page = (unsigned long)paca_ptrs[cpu]->emergency_sp - THREAD_SIZE;
2014 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2017 # ifdef CONFIG_PPC_BOOK3S_64
2018 stack_page = (unsigned long)paca_ptrs[cpu]->nmi_emergency_sp - THREAD_SIZE;
2019 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2022 stack_page = (unsigned long)paca_ptrs[cpu]->mc_emergency_sp - THREAD_SIZE;
2023 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2032 int validate_sp(unsigned long sp, struct task_struct *p,
2033 unsigned long nbytes)
2035 unsigned long stack_page = (unsigned long)task_stack_page(p);
2037 if (sp < THREAD_SIZE)
2040 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
2043 if (valid_irq_stack(sp, p, nbytes))
2046 return valid_emergency_stack(sp, p, nbytes);
2049 EXPORT_SYMBOL(validate_sp);
2051 static unsigned long __get_wchan(struct task_struct *p)
2053 unsigned long ip, sp;
2056 if (!p || p == current || p->state == TASK_RUNNING)
2060 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2064 sp = *(unsigned long *)sp;
2065 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2066 p->state == TASK_RUNNING)
2069 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
2070 if (!in_sched_functions(ip))
2073 } while (count++ < 16);
2077 unsigned long get_wchan(struct task_struct *p)
2081 if (!try_get_task_stack(p))
2084 ret = __get_wchan(p);
2091 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2093 void show_stack(struct task_struct *tsk, unsigned long *stack,
2096 unsigned long sp, ip, lr, newsp;
2099 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2100 unsigned long ret_addr;
2107 if (!try_get_task_stack(tsk))
2110 sp = (unsigned long) stack;
2113 sp = current_stack_frame();
2115 sp = tsk->thread.ksp;
2119 printk("%sCall Trace:\n", loglvl);
2121 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2124 stack = (unsigned long *) sp;
2126 ip = stack[STACK_FRAME_LR_SAVE];
2127 if (!firstframe || ip != lr) {
2128 printk("%s["REG"] ["REG"] %pS",
2129 loglvl, sp, ip, (void *)ip);
2130 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2131 ret_addr = ftrace_graph_ret_addr(current,
2132 &ftrace_idx, ip, stack);
2134 pr_cont(" (%pS)", (void *)ret_addr);
2137 pr_cont(" (unreliable)");
2143 * See if this is an exception frame.
2144 * We look for the "regshere" marker in the current frame.
2146 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
2147 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2148 struct pt_regs *regs = (struct pt_regs *)
2149 (sp + STACK_FRAME_OVERHEAD);
2151 printk("%s--- interrupt: %lx at %pS\n LR = %pS\n",
2153 (void *)regs->nip, (void *)lr);
2158 } while (count++ < kstack_depth_to_print);
2160 put_task_stack(tsk);
2164 /* Called with hard IRQs off */
2165 void notrace __ppc64_runlatch_on(void)
2167 struct thread_info *ti = current_thread_info();
2169 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2171 * Least significant bit (RUN) is the only writable bit of
2172 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2173 * earliest ISA where this is the case, but it's convenient.
2175 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2180 * Some architectures (e.g., Cell) have writable fields other
2181 * than RUN, so do the read-modify-write.
2183 ctrl = mfspr(SPRN_CTRLF);
2184 ctrl |= CTRL_RUNLATCH;
2185 mtspr(SPRN_CTRLT, ctrl);
2188 ti->local_flags |= _TLF_RUNLATCH;
2191 /* Called with hard IRQs off */
2192 void notrace __ppc64_runlatch_off(void)
2194 struct thread_info *ti = current_thread_info();
2196 ti->local_flags &= ~_TLF_RUNLATCH;
2198 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2199 mtspr(SPRN_CTRLT, 0);
2203 ctrl = mfspr(SPRN_CTRLF);
2204 ctrl &= ~CTRL_RUNLATCH;
2205 mtspr(SPRN_CTRLT, ctrl);
2208 #endif /* CONFIG_PPC64 */
2210 unsigned long arch_align_stack(unsigned long sp)
2212 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2213 sp -= get_random_int() & ~PAGE_MASK;
2217 static inline unsigned long brk_rnd(void)
2219 unsigned long rnd = 0;
2221 /* 8MB for 32bit, 1GB for 64bit */
2222 if (is_32bit_task())
2223 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2225 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2227 return rnd << PAGE_SHIFT;
2230 unsigned long arch_randomize_brk(struct mm_struct *mm)
2232 unsigned long base = mm->brk;
2235 #ifdef CONFIG_PPC_BOOK3S_64
2237 * If we are using 1TB segments and we are allowed to randomise
2238 * the heap, we can put it above 1TB so it is backed by a 1TB
2239 * segment. Otherwise the heap will be in the bottom 1TB
2240 * which always uses 256MB segments and this may result in a
2241 * performance penalty. We don't need to worry about radix. For
2242 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2244 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2245 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2248 ret = PAGE_ALIGN(base + brk_rnd());