2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/prctl.h>
29 #include <linux/init_task.h>
30 #include <linux/export.h>
31 #include <linux/kallsyms.h>
32 #include <linux/mqueue.h>
33 #include <linux/hardirq.h>
34 #include <linux/utsname.h>
35 #include <linux/ftrace.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/personality.h>
38 #include <linux/random.h>
39 #include <linux/hw_breakpoint.h>
40 #include <linux/uaccess.h>
42 #include <asm/pgtable.h>
44 #include <asm/processor.h>
47 #include <asm/machdep.h>
49 #include <asm/runlatch.h>
50 #include <asm/syscalls.h>
51 #include <asm/switch_to.h>
53 #include <asm/debug.h>
55 #include <asm/firmware.h>
57 #include <asm/code-patching.h>
58 #include <asm/livepatch.h>
60 #include <linux/kprobes.h>
61 #include <linux/kdebug.h>
63 /* Transactional Memory debug */
65 #define TM_DEBUG(x...) printk(KERN_INFO x)
67 #define TM_DEBUG(x...) do { } while(0)
70 extern unsigned long _get_SP(void);
72 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
73 static void check_if_tm_restore_required(struct task_struct *tsk)
76 * If we are saving the current thread's registers, and the
77 * thread is in a transactional state, set the TIF_RESTORE_TM
78 * bit so that we know to restore the registers before
79 * returning to userspace.
81 if (tsk == current && tsk->thread.regs &&
82 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
83 !test_thread_flag(TIF_RESTORE_TM)) {
84 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
85 set_thread_flag(TIF_RESTORE_TM);
89 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
90 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
92 bool strict_msr_control;
93 EXPORT_SYMBOL(strict_msr_control);
95 static int __init enable_strict_msr_control(char *str)
97 strict_msr_control = true;
98 pr_info("Enabling strict facility control\n");
102 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
104 void msr_check_and_set(unsigned long bits)
106 unsigned long oldmsr = mfmsr();
107 unsigned long newmsr;
109 newmsr = oldmsr | bits;
112 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
116 if (oldmsr != newmsr)
120 void __msr_check_and_clear(unsigned long bits)
122 unsigned long oldmsr = mfmsr();
123 unsigned long newmsr;
125 newmsr = oldmsr & ~bits;
128 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
132 if (oldmsr != newmsr)
135 EXPORT_SYMBOL(__msr_check_and_clear);
137 #ifdef CONFIG_PPC_FPU
138 void __giveup_fpu(struct task_struct *tsk)
141 tsk->thread.regs->msr &= ~MSR_FP;
143 if (cpu_has_feature(CPU_FTR_VSX))
144 tsk->thread.regs->msr &= ~MSR_VSX;
148 void giveup_fpu(struct task_struct *tsk)
150 check_if_tm_restore_required(tsk);
152 msr_check_and_set(MSR_FP);
154 msr_check_and_clear(MSR_FP);
156 EXPORT_SYMBOL(giveup_fpu);
159 * Make sure the floating-point register state in the
160 * the thread_struct is up to date for task tsk.
162 void flush_fp_to_thread(struct task_struct *tsk)
164 if (tsk->thread.regs) {
166 * We need to disable preemption here because if we didn't,
167 * another process could get scheduled after the regs->msr
168 * test but before we have finished saving the FP registers
169 * to the thread_struct. That process could take over the
170 * FPU, and then when we get scheduled again we would store
171 * bogus values for the remaining FP registers.
174 if (tsk->thread.regs->msr & MSR_FP) {
176 * This should only ever be called for current or
177 * for a stopped child process. Since we save away
178 * the FP register state on context switch,
179 * there is something wrong if a stopped child appears
180 * to still have its FP state in the CPU registers.
182 BUG_ON(tsk != current);
188 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
190 void enable_kernel_fp(void)
192 WARN_ON(preemptible());
194 msr_check_and_set(MSR_FP);
196 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
197 check_if_tm_restore_required(current);
198 __giveup_fpu(current);
201 EXPORT_SYMBOL(enable_kernel_fp);
203 static int restore_fp(struct task_struct *tsk) {
204 if (tsk->thread.load_fp) {
205 load_fp_state(¤t->thread.fp_state);
206 current->thread.load_fp++;
212 static int restore_fp(struct task_struct *tsk) { return 0; }
213 #endif /* CONFIG_PPC_FPU */
215 #ifdef CONFIG_ALTIVEC
216 #define loadvec(thr) ((thr).load_vec)
218 static void __giveup_altivec(struct task_struct *tsk)
221 tsk->thread.regs->msr &= ~MSR_VEC;
223 if (cpu_has_feature(CPU_FTR_VSX))
224 tsk->thread.regs->msr &= ~MSR_VSX;
228 void giveup_altivec(struct task_struct *tsk)
230 check_if_tm_restore_required(tsk);
232 msr_check_and_set(MSR_VEC);
233 __giveup_altivec(tsk);
234 msr_check_and_clear(MSR_VEC);
236 EXPORT_SYMBOL(giveup_altivec);
238 void enable_kernel_altivec(void)
240 WARN_ON(preemptible());
242 msr_check_and_set(MSR_VEC);
244 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
245 check_if_tm_restore_required(current);
246 __giveup_altivec(current);
249 EXPORT_SYMBOL(enable_kernel_altivec);
252 * Make sure the VMX/Altivec register state in the
253 * the thread_struct is up to date for task tsk.
255 void flush_altivec_to_thread(struct task_struct *tsk)
257 if (tsk->thread.regs) {
259 if (tsk->thread.regs->msr & MSR_VEC) {
260 BUG_ON(tsk != current);
266 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
268 static int restore_altivec(struct task_struct *tsk)
270 if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
271 load_vr_state(&tsk->thread.vr_state);
272 tsk->thread.used_vr = 1;
273 tsk->thread.load_vec++;
280 #define loadvec(thr) 0
281 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
282 #endif /* CONFIG_ALTIVEC */
285 static void __giveup_vsx(struct task_struct *tsk)
287 if (tsk->thread.regs->msr & MSR_FP)
289 if (tsk->thread.regs->msr & MSR_VEC)
290 __giveup_altivec(tsk);
291 tsk->thread.regs->msr &= ~MSR_VSX;
294 static void giveup_vsx(struct task_struct *tsk)
296 check_if_tm_restore_required(tsk);
298 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
300 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
303 static void save_vsx(struct task_struct *tsk)
305 if (tsk->thread.regs->msr & MSR_FP)
307 if (tsk->thread.regs->msr & MSR_VEC)
311 void enable_kernel_vsx(void)
313 WARN_ON(preemptible());
315 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
317 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
318 check_if_tm_restore_required(current);
319 if (current->thread.regs->msr & MSR_FP)
320 __giveup_fpu(current);
321 if (current->thread.regs->msr & MSR_VEC)
322 __giveup_altivec(current);
323 __giveup_vsx(current);
326 EXPORT_SYMBOL(enable_kernel_vsx);
328 void flush_vsx_to_thread(struct task_struct *tsk)
330 if (tsk->thread.regs) {
332 if (tsk->thread.regs->msr & MSR_VSX) {
333 BUG_ON(tsk != current);
339 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
341 static int restore_vsx(struct task_struct *tsk)
343 if (cpu_has_feature(CPU_FTR_VSX)) {
344 tsk->thread.used_vsr = 1;
351 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
352 static inline void save_vsx(struct task_struct *tsk) { }
353 #endif /* CONFIG_VSX */
356 void giveup_spe(struct task_struct *tsk)
358 check_if_tm_restore_required(tsk);
360 msr_check_and_set(MSR_SPE);
362 msr_check_and_clear(MSR_SPE);
364 EXPORT_SYMBOL(giveup_spe);
366 void enable_kernel_spe(void)
368 WARN_ON(preemptible());
370 msr_check_and_set(MSR_SPE);
372 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
373 check_if_tm_restore_required(current);
374 __giveup_spe(current);
377 EXPORT_SYMBOL(enable_kernel_spe);
379 void flush_spe_to_thread(struct task_struct *tsk)
381 if (tsk->thread.regs) {
383 if (tsk->thread.regs->msr & MSR_SPE) {
384 BUG_ON(tsk != current);
385 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
391 #endif /* CONFIG_SPE */
393 static unsigned long msr_all_available;
395 static int __init init_msr_all_available(void)
397 #ifdef CONFIG_PPC_FPU
398 msr_all_available |= MSR_FP;
400 #ifdef CONFIG_ALTIVEC
401 if (cpu_has_feature(CPU_FTR_ALTIVEC))
402 msr_all_available |= MSR_VEC;
405 if (cpu_has_feature(CPU_FTR_VSX))
406 msr_all_available |= MSR_VSX;
409 if (cpu_has_feature(CPU_FTR_SPE))
410 msr_all_available |= MSR_SPE;
415 early_initcall(init_msr_all_available);
417 void giveup_all(struct task_struct *tsk)
419 unsigned long usermsr;
421 if (!tsk->thread.regs)
424 usermsr = tsk->thread.regs->msr;
426 if ((usermsr & msr_all_available) == 0)
429 msr_check_and_set(msr_all_available);
431 #ifdef CONFIG_PPC_FPU
432 if (usermsr & MSR_FP)
435 #ifdef CONFIG_ALTIVEC
436 if (usermsr & MSR_VEC)
437 __giveup_altivec(tsk);
440 if (usermsr & MSR_VSX)
444 if (usermsr & MSR_SPE)
448 msr_check_and_clear(msr_all_available);
450 EXPORT_SYMBOL(giveup_all);
452 void restore_math(struct pt_regs *regs)
456 if (!current->thread.load_fp && !loadvec(current->thread))
460 msr_check_and_set(msr_all_available);
463 * Only reload if the bit is not set in the user MSR, the bit BEING set
464 * indicates that the registers are hot
466 if ((!(msr & MSR_FP)) && restore_fp(current))
467 msr |= MSR_FP | current->thread.fpexc_mode;
469 if ((!(msr & MSR_VEC)) && restore_altivec(current))
472 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
473 restore_vsx(current)) {
477 msr_check_and_clear(msr_all_available);
482 void save_all(struct task_struct *tsk)
484 unsigned long usermsr;
486 if (!tsk->thread.regs)
489 usermsr = tsk->thread.regs->msr;
491 if ((usermsr & msr_all_available) == 0)
494 msr_check_and_set(msr_all_available);
497 * Saving the way the register space is in hardware, save_vsx boils
498 * down to a save_fpu() and save_altivec()
500 if (usermsr & MSR_VSX) {
503 if (usermsr & MSR_FP)
506 if (usermsr & MSR_VEC)
510 if (usermsr & MSR_SPE)
513 msr_check_and_clear(msr_all_available);
516 void flush_all_to_thread(struct task_struct *tsk)
518 if (tsk->thread.regs) {
520 BUG_ON(tsk != current);
524 if (tsk->thread.regs->msr & MSR_SPE)
525 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
531 EXPORT_SYMBOL(flush_all_to_thread);
533 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
534 void do_send_trap(struct pt_regs *regs, unsigned long address,
535 unsigned long error_code, int signal_code, int breakpt)
539 current->thread.trap_nr = signal_code;
540 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
541 11, SIGSEGV) == NOTIFY_STOP)
544 /* Deliver the signal to userspace */
545 info.si_signo = SIGTRAP;
546 info.si_errno = breakpt; /* breakpoint or watchpoint id */
547 info.si_code = signal_code;
548 info.si_addr = (void __user *)address;
549 force_sig_info(SIGTRAP, &info, current);
551 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
552 void do_break (struct pt_regs *regs, unsigned long address,
553 unsigned long error_code)
557 current->thread.trap_nr = TRAP_HWBKPT;
558 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
559 11, SIGSEGV) == NOTIFY_STOP)
562 if (debugger_break_match(regs))
565 /* Clear the breakpoint */
566 hw_breakpoint_disable();
568 /* Deliver the signal to userspace */
569 info.si_signo = SIGTRAP;
571 info.si_code = TRAP_HWBKPT;
572 info.si_addr = (void __user *)address;
573 force_sig_info(SIGTRAP, &info, current);
575 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
577 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
579 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
581 * Set the debug registers back to their default "safe" values.
583 static void set_debug_reg_defaults(struct thread_struct *thread)
585 thread->debug.iac1 = thread->debug.iac2 = 0;
586 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
587 thread->debug.iac3 = thread->debug.iac4 = 0;
589 thread->debug.dac1 = thread->debug.dac2 = 0;
590 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
591 thread->debug.dvc1 = thread->debug.dvc2 = 0;
593 thread->debug.dbcr0 = 0;
596 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
598 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
599 DBCR1_IAC3US | DBCR1_IAC4US;
601 * Force Data Address Compare User/Supervisor bits to be User-only
602 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
604 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
606 thread->debug.dbcr1 = 0;
610 static void prime_debug_regs(struct debug_reg *debug)
613 * We could have inherited MSR_DE from userspace, since
614 * it doesn't get cleared on exception entry. Make sure
615 * MSR_DE is clear before we enable any debug events.
617 mtmsr(mfmsr() & ~MSR_DE);
619 mtspr(SPRN_IAC1, debug->iac1);
620 mtspr(SPRN_IAC2, debug->iac2);
621 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
622 mtspr(SPRN_IAC3, debug->iac3);
623 mtspr(SPRN_IAC4, debug->iac4);
625 mtspr(SPRN_DAC1, debug->dac1);
626 mtspr(SPRN_DAC2, debug->dac2);
627 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
628 mtspr(SPRN_DVC1, debug->dvc1);
629 mtspr(SPRN_DVC2, debug->dvc2);
631 mtspr(SPRN_DBCR0, debug->dbcr0);
632 mtspr(SPRN_DBCR1, debug->dbcr1);
634 mtspr(SPRN_DBCR2, debug->dbcr2);
638 * Unless neither the old or new thread are making use of the
639 * debug registers, set the debug registers from the values
640 * stored in the new thread.
642 void switch_booke_debug_regs(struct debug_reg *new_debug)
644 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
645 || (new_debug->dbcr0 & DBCR0_IDM))
646 prime_debug_regs(new_debug);
648 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
649 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
650 #ifndef CONFIG_HAVE_HW_BREAKPOINT
651 static void set_debug_reg_defaults(struct thread_struct *thread)
653 thread->hw_brk.address = 0;
654 thread->hw_brk.type = 0;
655 set_breakpoint(&thread->hw_brk);
657 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
658 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
660 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
661 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
663 mtspr(SPRN_DAC1, dabr);
664 #ifdef CONFIG_PPC_47x
669 #elif defined(CONFIG_PPC_BOOK3S)
670 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
672 mtspr(SPRN_DABR, dabr);
673 if (cpu_has_feature(CPU_FTR_DABRX))
674 mtspr(SPRN_DABRX, dabrx);
678 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
684 static inline int set_dabr(struct arch_hw_breakpoint *brk)
686 unsigned long dabr, dabrx;
688 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
689 dabrx = ((brk->type >> 3) & 0x7);
692 return ppc_md.set_dabr(dabr, dabrx);
694 return __set_dabr(dabr, dabrx);
697 static inline int set_dawr(struct arch_hw_breakpoint *brk)
699 unsigned long dawr, dawrx, mrd;
703 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
704 << (63 - 58); //* read/write bits */
705 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
706 << (63 - 59); //* translate */
707 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
708 >> 3; //* PRIM bits */
709 /* dawr length is stored in field MDR bits 48:53. Matches range in
710 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
712 brk->len is in bytes.
713 This aligns up to double word size, shifts and does the bias.
715 mrd = ((brk->len + 7) >> 3) - 1;
716 dawrx |= (mrd & 0x3f) << (63 - 53);
719 return ppc_md.set_dawr(dawr, dawrx);
720 mtspr(SPRN_DAWR, dawr);
721 mtspr(SPRN_DAWRX, dawrx);
725 void __set_breakpoint(struct arch_hw_breakpoint *brk)
727 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
729 if (cpu_has_feature(CPU_FTR_DAWR))
735 void set_breakpoint(struct arch_hw_breakpoint *brk)
738 __set_breakpoint(brk);
743 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
746 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
747 struct arch_hw_breakpoint *b)
749 if (a->address != b->address)
751 if (a->type != b->type)
753 if (a->len != b->len)
758 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
759 static void tm_reclaim_thread(struct thread_struct *thr,
760 struct thread_info *ti, uint8_t cause)
762 unsigned long msr_diff = 0;
765 * If FP/VSX registers have been already saved to the
766 * thread_struct, move them to the transact_fp array.
767 * We clear the TIF_RESTORE_TM bit since after the reclaim
768 * the thread will no longer be transactional.
770 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
771 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
772 if (msr_diff & MSR_FP)
773 memcpy(&thr->transact_fp, &thr->fp_state,
774 sizeof(struct thread_fp_state));
775 if (msr_diff & MSR_VEC)
776 memcpy(&thr->transact_vr, &thr->vr_state,
777 sizeof(struct thread_vr_state));
778 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
779 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
783 * Use the current MSR TM suspended bit to track if we have
784 * checkpointed state outstanding.
785 * On signal delivery, we'd normally reclaim the checkpointed
786 * state to obtain stack pointer (see:get_tm_stackpointer()).
787 * This will then directly return to userspace without going
788 * through __switch_to(). However, if the stack frame is bad,
789 * we need to exit this thread which calls __switch_to() which
790 * will again attempt to reclaim the already saved tm state.
791 * Hence we need to check that we've not already reclaimed
793 * We do this using the current MSR, rather tracking it in
794 * some specific thread_struct bit, as it has the additional
795 * benifit of checking for a potential TM bad thing exception.
797 if (!MSR_TM_SUSPENDED(mfmsr()))
800 tm_reclaim(thr, thr->regs->msr, cause);
802 /* Having done the reclaim, we now have the checkpointed
803 * FP/VSX values in the registers. These might be valid
804 * even if we have previously called enable_kernel_fp() or
805 * flush_fp_to_thread(), so update thr->regs->msr to
806 * indicate their current validity.
808 thr->regs->msr |= msr_diff;
811 void tm_reclaim_current(uint8_t cause)
814 tm_reclaim_thread(¤t->thread, current_thread_info(), cause);
817 static inline void tm_reclaim_task(struct task_struct *tsk)
819 /* We have to work out if we're switching from/to a task that's in the
820 * middle of a transaction.
822 * In switching we need to maintain a 2nd register state as
823 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
824 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
825 * (current) FPRs into oldtask->thread.transact_fpr[].
827 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
829 struct thread_struct *thr = &tsk->thread;
834 if (!MSR_TM_ACTIVE(thr->regs->msr))
835 goto out_and_saveregs;
837 /* Stash the original thread MSR, as giveup_fpu et al will
838 * modify it. We hold onto it to see whether the task used
839 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
840 * ckpt_regs.msr is already set.
842 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
843 thr->ckpt_regs.msr = thr->regs->msr;
845 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
846 "ccr=%lx, msr=%lx, trap=%lx)\n",
847 tsk->pid, thr->regs->nip,
848 thr->regs->ccr, thr->regs->msr,
851 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
853 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
857 /* Always save the regs here, even if a transaction's not active.
858 * This context-switches a thread's TM info SPRs. We do it here to
859 * be consistent with the restore path (in recheckpoint) which
860 * cannot happen later in _switch().
865 extern void __tm_recheckpoint(struct thread_struct *thread,
866 unsigned long orig_msr);
868 void tm_recheckpoint(struct thread_struct *thread,
869 unsigned long orig_msr)
873 /* We really can't be interrupted here as the TEXASR registers can't
874 * change and later in the trecheckpoint code, we have a userspace R1.
875 * So let's hard disable over this region.
877 local_irq_save(flags);
880 /* The TM SPRs are restored here, so that TEXASR.FS can be set
881 * before the trecheckpoint and no explosion occurs.
883 tm_restore_sprs(thread);
885 __tm_recheckpoint(thread, orig_msr);
887 local_irq_restore(flags);
890 static inline void tm_recheckpoint_new_task(struct task_struct *new)
894 if (!cpu_has_feature(CPU_FTR_TM))
897 /* Recheckpoint the registers of the thread we're about to switch to.
899 * If the task was using FP, we non-lazily reload both the original and
900 * the speculative FP register states. This is because the kernel
901 * doesn't see if/when a TM rollback occurs, so if we take an FP
902 * unavoidable later, we are unable to determine which set of FP regs
903 * need to be restored.
905 if (!new->thread.regs)
908 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
909 tm_restore_sprs(&new->thread);
912 msr = new->thread.ckpt_regs.msr;
913 /* Recheckpoint to restore original checkpointed register state. */
914 TM_DEBUG("*** tm_recheckpoint of pid %d "
915 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
916 new->pid, new->thread.regs->msr, msr);
918 /* This loads the checkpointed FP/VEC state, if used */
919 tm_recheckpoint(&new->thread, msr);
921 /* This loads the speculative FP/VEC state, if used */
923 do_load_up_transact_fpu(&new->thread);
924 new->thread.regs->msr |=
925 (MSR_FP | new->thread.fpexc_mode);
927 #ifdef CONFIG_ALTIVEC
929 do_load_up_transact_altivec(&new->thread);
930 new->thread.regs->msr |= MSR_VEC;
933 /* We may as well turn on VSX too since all the state is restored now */
935 new->thread.regs->msr |= MSR_VSX;
937 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
938 "(kernel msr 0x%lx)\n",
942 static inline void __switch_to_tm(struct task_struct *prev)
944 if (cpu_has_feature(CPU_FTR_TM)) {
946 tm_reclaim_task(prev);
951 * This is called if we are on the way out to userspace and the
952 * TIF_RESTORE_TM flag is set. It checks if we need to reload
953 * FP and/or vector state and does so if necessary.
954 * If userspace is inside a transaction (whether active or
955 * suspended) and FP/VMX/VSX instructions have ever been enabled
956 * inside that transaction, then we have to keep them enabled
957 * and keep the FP/VMX/VSX state loaded while ever the transaction
958 * continues. The reason is that if we didn't, and subsequently
959 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
960 * we don't know whether it's the same transaction, and thus we
961 * don't know which of the checkpointed state and the transactional
964 void restore_tm_state(struct pt_regs *regs)
966 unsigned long msr_diff;
968 clear_thread_flag(TIF_RESTORE_TM);
969 if (!MSR_TM_ACTIVE(regs->msr))
972 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
973 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
977 regs->msr |= msr_diff;
981 #define tm_recheckpoint_new_task(new)
982 #define __switch_to_tm(prev)
983 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
985 static inline void save_sprs(struct thread_struct *t)
987 #ifdef CONFIG_ALTIVEC
988 if (cpu_has_feature(CPU_FTR_ALTIVEC))
989 t->vrsave = mfspr(SPRN_VRSAVE);
991 #ifdef CONFIG_PPC_BOOK3S_64
992 if (cpu_has_feature(CPU_FTR_DSCR))
993 t->dscr = mfspr(SPRN_DSCR);
995 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
996 t->bescr = mfspr(SPRN_BESCR);
997 t->ebbhr = mfspr(SPRN_EBBHR);
998 t->ebbrr = mfspr(SPRN_EBBRR);
1000 t->fscr = mfspr(SPRN_FSCR);
1003 * Note that the TAR is not available for use in the kernel.
1004 * (To provide this, the TAR should be backed up/restored on
1005 * exception entry/exit instead, and be in pt_regs. FIXME,
1006 * this should be in pt_regs anyway (for debug).)
1008 t->tar = mfspr(SPRN_TAR);
1013 static inline void restore_sprs(struct thread_struct *old_thread,
1014 struct thread_struct *new_thread)
1016 #ifdef CONFIG_ALTIVEC
1017 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1018 old_thread->vrsave != new_thread->vrsave)
1019 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1021 #ifdef CONFIG_PPC_BOOK3S_64
1022 if (cpu_has_feature(CPU_FTR_DSCR)) {
1023 u64 dscr = get_paca()->dscr_default;
1024 u64 fscr = old_thread->fscr & ~FSCR_DSCR;
1026 if (new_thread->dscr_inherit) {
1027 dscr = new_thread->dscr;
1031 if (old_thread->dscr != dscr)
1032 mtspr(SPRN_DSCR, dscr);
1034 if (old_thread->fscr != fscr)
1035 mtspr(SPRN_FSCR, fscr);
1038 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1039 if (old_thread->bescr != new_thread->bescr)
1040 mtspr(SPRN_BESCR, new_thread->bescr);
1041 if (old_thread->ebbhr != new_thread->ebbhr)
1042 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1043 if (old_thread->ebbrr != new_thread->ebbrr)
1044 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1046 if (old_thread->tar != new_thread->tar)
1047 mtspr(SPRN_TAR, new_thread->tar);
1052 struct task_struct *__switch_to(struct task_struct *prev,
1053 struct task_struct *new)
1055 struct thread_struct *new_thread, *old_thread;
1056 struct task_struct *last;
1057 #ifdef CONFIG_PPC_BOOK3S_64
1058 struct ppc64_tlb_batch *batch;
1061 new_thread = &new->thread;
1062 old_thread = ¤t->thread;
1064 WARN_ON(!irqs_disabled());
1068 * Collect processor utilization data per process
1070 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
1071 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
1072 long unsigned start_tb, current_tb;
1073 start_tb = old_thread->start_tb;
1074 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1075 old_thread->accum_tb += (current_tb - start_tb);
1076 new_thread->start_tb = current_tb;
1078 #endif /* CONFIG_PPC64 */
1080 #ifdef CONFIG_PPC_BOOK3S_64
1081 batch = this_cpu_ptr(&ppc64_tlb_batch);
1082 if (batch->active) {
1083 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1085 __flush_tlb_pending(batch);
1088 #endif /* CONFIG_PPC_BOOK3S_64 */
1090 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1091 switch_booke_debug_regs(&new->thread.debug);
1094 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1097 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1098 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1099 __set_breakpoint(&new->thread.hw_brk);
1100 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1104 * We need to save SPRs before treclaim/trecheckpoint as these will
1105 * change a number of them.
1107 save_sprs(&prev->thread);
1109 __switch_to_tm(prev);
1111 /* Save FPU, Altivec, VSX and SPE state */
1115 * We can't take a PMU exception inside _switch() since there is a
1116 * window where the kernel stack SLB and the kernel stack are out
1117 * of sync. Hard disable here.
1121 tm_recheckpoint_new_task(new);
1124 * Call restore_sprs() before calling _switch(). If we move it after
1125 * _switch() then we miss out on calling it for new tasks. The reason
1126 * for this is we manually create a stack frame for new tasks that
1127 * directly returns through ret_from_fork() or
1128 * ret_from_kernel_thread(). See copy_thread() for details.
1130 restore_sprs(old_thread, new_thread);
1132 last = _switch(old_thread, new_thread);
1134 #ifdef CONFIG_PPC_BOOK3S_64
1135 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1136 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1137 batch = this_cpu_ptr(&ppc64_tlb_batch);
1141 if (current_thread_info()->task->thread.regs)
1142 restore_math(current_thread_info()->task->thread.regs);
1144 #endif /* CONFIG_PPC_BOOK3S_64 */
1149 static int instructions_to_print = 16;
1151 static void show_instructions(struct pt_regs *regs)
1154 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1157 printk("Instruction dump:");
1159 for (i = 0; i < instructions_to_print; i++) {
1165 #if !defined(CONFIG_BOOKE)
1166 /* If executing with the IMMU off, adjust pc rather
1167 * than print XXXXXXXX.
1169 if (!(regs->msr & MSR_IR))
1170 pc = (unsigned long)phys_to_virt(pc);
1173 if (!__kernel_text_address(pc) ||
1174 probe_kernel_address((unsigned int __user *)pc, instr)) {
1175 printk(KERN_CONT "XXXXXXXX ");
1177 if (regs->nip == pc)
1178 printk(KERN_CONT "<%08x> ", instr);
1180 printk(KERN_CONT "%08x ", instr);
1194 static struct regbit msr_bits[] = {
1195 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1217 #ifndef CONFIG_BOOKE
1224 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1228 for (; bits->bit; ++bits)
1229 if (val & bits->bit) {
1230 printk("%s%s", s, bits->name);
1235 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1236 static struct regbit msr_tm_bits[] = {
1243 static void print_tm_bits(unsigned long val)
1246 * This only prints something if at least one of the TM bit is set.
1247 * Inside the TM[], the output means:
1248 * E: Enabled (bit 32)
1249 * S: Suspended (bit 33)
1250 * T: Transactional (bit 34)
1252 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1254 print_bits(val, msr_tm_bits, "");
1259 static void print_tm_bits(unsigned long val) {}
1262 static void print_msr_bits(unsigned long val)
1265 print_bits(val, msr_bits, ",");
1271 #define REG "%016lx"
1272 #define REGS_PER_LINE 4
1273 #define LAST_VOLATILE 13
1276 #define REGS_PER_LINE 8
1277 #define LAST_VOLATILE 12
1280 void show_regs(struct pt_regs * regs)
1284 show_regs_print_info(KERN_DEFAULT);
1286 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1287 regs->nip, regs->link, regs->ctr);
1288 printk("REGS: %p TRAP: %04lx %s (%s)\n",
1289 regs, regs->trap, print_tainted(), init_utsname()->release);
1290 printk("MSR: "REG" ", regs->msr);
1291 print_msr_bits(regs->msr);
1292 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1294 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1295 printk("CFAR: "REG" ", regs->orig_gpr3);
1296 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1297 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1298 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1300 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1303 printk("SOFTE: %ld ", regs->softe);
1305 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1306 if (MSR_TM_ACTIVE(regs->msr))
1307 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1310 for (i = 0; i < 32; i++) {
1311 if ((i % REGS_PER_LINE) == 0)
1312 printk("\nGPR%02d: ", i);
1313 printk(REG " ", regs->gpr[i]);
1314 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1318 #ifdef CONFIG_KALLSYMS
1320 * Lookup NIP late so we have the best change of getting the
1321 * above info out without failing
1323 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1324 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1326 show_stack(current, (unsigned long *) regs->gpr[1]);
1327 if (!user_mode(regs))
1328 show_instructions(regs);
1331 void exit_thread(void)
1335 void flush_thread(void)
1337 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1338 flush_ptrace_hw_breakpoint(current);
1339 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1340 set_debug_reg_defaults(¤t->thread);
1341 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1345 release_thread(struct task_struct *t)
1350 * this gets called so that we can store coprocessor state into memory and
1351 * copy the current task into the new thread.
1353 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1355 flush_all_to_thread(src);
1357 * Flush TM state out so we can copy it. __switch_to_tm() does this
1358 * flush but it removes the checkpointed state from the current CPU and
1359 * transitions the CPU out of TM mode. Hence we need to call
1360 * tm_recheckpoint_new_task() (on the same task) to restore the
1361 * checkpointed state back and the TM mode.
1363 __switch_to_tm(src);
1364 tm_recheckpoint_new_task(src);
1368 clear_task_ebb(dst);
1373 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1375 #ifdef CONFIG_PPC_STD_MMU_64
1376 unsigned long sp_vsid;
1377 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1379 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1380 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1381 << SLB_VSID_SHIFT_1T;
1383 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1385 sp_vsid |= SLB_VSID_KERNEL | llp;
1386 p->thread.ksp_vsid = sp_vsid;
1395 * Copy architecture-specific thread state
1397 int copy_thread(unsigned long clone_flags, unsigned long usp,
1398 unsigned long kthread_arg, struct task_struct *p)
1400 struct pt_regs *childregs, *kregs;
1401 extern void ret_from_fork(void);
1402 extern void ret_from_kernel_thread(void);
1404 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1405 struct thread_info *ti = task_thread_info(p);
1407 klp_init_thread_info(ti);
1409 /* Copy registers */
1410 sp -= sizeof(struct pt_regs);
1411 childregs = (struct pt_regs *) sp;
1412 if (unlikely(p->flags & PF_KTHREAD)) {
1414 memset(childregs, 0, sizeof(struct pt_regs));
1415 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1418 childregs->gpr[14] = ppc_function_entry((void *)usp);
1420 clear_tsk_thread_flag(p, TIF_32BIT);
1421 childregs->softe = 1;
1423 childregs->gpr[15] = kthread_arg;
1424 p->thread.regs = NULL; /* no user register state */
1425 ti->flags |= _TIF_RESTOREALL;
1426 f = ret_from_kernel_thread;
1429 struct pt_regs *regs = current_pt_regs();
1430 CHECK_FULL_REGS(regs);
1433 childregs->gpr[1] = usp;
1434 p->thread.regs = childregs;
1435 childregs->gpr[3] = 0; /* Result from fork() */
1436 if (clone_flags & CLONE_SETTLS) {
1438 if (!is_32bit_task())
1439 childregs->gpr[13] = childregs->gpr[6];
1442 childregs->gpr[2] = childregs->gpr[6];
1447 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1448 sp -= STACK_FRAME_OVERHEAD;
1451 * The way this works is that at some point in the future
1452 * some task will call _switch to switch to the new task.
1453 * That will pop off the stack frame created below and start
1454 * the new task running at ret_from_fork. The new task will
1455 * do some house keeping and then return from the fork or clone
1456 * system call, using the stack frame created above.
1458 ((unsigned long *)sp)[0] = 0;
1459 sp -= sizeof(struct pt_regs);
1460 kregs = (struct pt_regs *) sp;
1461 sp -= STACK_FRAME_OVERHEAD;
1464 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1465 _ALIGN_UP(sizeof(struct thread_info), 16);
1467 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1468 p->thread.ptrace_bps[0] = NULL;
1471 p->thread.fp_save_area = NULL;
1472 #ifdef CONFIG_ALTIVEC
1473 p->thread.vr_save_area = NULL;
1476 setup_ksp_vsid(p, sp);
1479 if (cpu_has_feature(CPU_FTR_DSCR)) {
1480 p->thread.dscr_inherit = current->thread.dscr_inherit;
1481 p->thread.dscr = mfspr(SPRN_DSCR);
1483 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1484 p->thread.ppr = INIT_PPR;
1486 kregs->nip = ppc_function_entry(f);
1491 * Set up a thread for executing a new program
1493 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1496 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1500 * If we exec out of a kernel thread then thread.regs will not be
1503 if (!current->thread.regs) {
1504 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1505 current->thread.regs = regs - 1;
1508 memset(regs->gpr, 0, sizeof(regs->gpr));
1516 * We have just cleared all the nonvolatile GPRs, so make
1517 * FULL_REGS(regs) return true. This is necessary to allow
1518 * ptrace to examine the thread immediately after exec.
1525 regs->msr = MSR_USER;
1527 if (!is_32bit_task()) {
1528 unsigned long entry;
1530 if (is_elf2_task()) {
1531 /* Look ma, no function descriptors! */
1536 * The latest iteration of the ABI requires that when
1537 * calling a function (at its global entry point),
1538 * the caller must ensure r12 holds the entry point
1539 * address (so that the function can quickly
1540 * establish addressability).
1542 regs->gpr[12] = start;
1543 /* Make sure that's restored on entry to userspace. */
1544 set_thread_flag(TIF_RESTOREALL);
1548 /* start is a relocated pointer to the function
1549 * descriptor for the elf _start routine. The first
1550 * entry in the function descriptor is the entry
1551 * address of _start and the second entry is the TOC
1552 * value we need to use.
1554 __get_user(entry, (unsigned long __user *)start);
1555 __get_user(toc, (unsigned long __user *)start+1);
1557 /* Check whether the e_entry function descriptor entries
1558 * need to be relocated before we can use them.
1560 if (load_addr != 0) {
1567 regs->msr = MSR_USER64;
1571 regs->msr = MSR_USER32;
1575 current->thread.used_vsr = 0;
1577 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1578 current->thread.fp_save_area = NULL;
1579 #ifdef CONFIG_ALTIVEC
1580 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1581 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1582 current->thread.vr_save_area = NULL;
1583 current->thread.vrsave = 0;
1584 current->thread.used_vr = 0;
1585 #endif /* CONFIG_ALTIVEC */
1587 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1588 current->thread.acc = 0;
1589 current->thread.spefscr = 0;
1590 current->thread.used_spe = 0;
1591 #endif /* CONFIG_SPE */
1592 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1593 if (cpu_has_feature(CPU_FTR_TM))
1594 regs->msr |= MSR_TM;
1595 current->thread.tm_tfhar = 0;
1596 current->thread.tm_texasr = 0;
1597 current->thread.tm_tfiar = 0;
1598 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1600 EXPORT_SYMBOL(start_thread);
1602 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1603 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1605 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1607 struct pt_regs *regs = tsk->thread.regs;
1609 /* This is a bit hairy. If we are an SPE enabled processor
1610 * (have embedded fp) we store the IEEE exception enable flags in
1611 * fpexc_mode. fpexc_mode is also used for setting FP exception
1612 * mode (asyn, precise, disabled) for 'Classic' FP. */
1613 if (val & PR_FP_EXC_SW_ENABLE) {
1615 if (cpu_has_feature(CPU_FTR_SPE)) {
1617 * When the sticky exception bits are set
1618 * directly by userspace, it must call prctl
1619 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1620 * in the existing prctl settings) or
1621 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1622 * the bits being set). <fenv.h> functions
1623 * saving and restoring the whole
1624 * floating-point environment need to do so
1625 * anyway to restore the prctl settings from
1626 * the saved environment.
1628 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1629 tsk->thread.fpexc_mode = val &
1630 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1640 /* on a CONFIG_SPE this does not hurt us. The bits that
1641 * __pack_fe01 use do not overlap with bits used for
1642 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1643 * on CONFIG_SPE implementations are reserved so writing to
1644 * them does not change anything */
1645 if (val > PR_FP_EXC_PRECISE)
1647 tsk->thread.fpexc_mode = __pack_fe01(val);
1648 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1649 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1650 | tsk->thread.fpexc_mode;
1654 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1658 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1660 if (cpu_has_feature(CPU_FTR_SPE)) {
1662 * When the sticky exception bits are set
1663 * directly by userspace, it must call prctl
1664 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1665 * in the existing prctl settings) or
1666 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1667 * the bits being set). <fenv.h> functions
1668 * saving and restoring the whole
1669 * floating-point environment need to do so
1670 * anyway to restore the prctl settings from
1671 * the saved environment.
1673 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1674 val = tsk->thread.fpexc_mode;
1681 val = __unpack_fe01(tsk->thread.fpexc_mode);
1682 return put_user(val, (unsigned int __user *) adr);
1685 int set_endian(struct task_struct *tsk, unsigned int val)
1687 struct pt_regs *regs = tsk->thread.regs;
1689 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1690 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1696 if (val == PR_ENDIAN_BIG)
1697 regs->msr &= ~MSR_LE;
1698 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1699 regs->msr |= MSR_LE;
1706 int get_endian(struct task_struct *tsk, unsigned long adr)
1708 struct pt_regs *regs = tsk->thread.regs;
1711 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1712 !cpu_has_feature(CPU_FTR_REAL_LE))
1718 if (regs->msr & MSR_LE) {
1719 if (cpu_has_feature(CPU_FTR_REAL_LE))
1720 val = PR_ENDIAN_LITTLE;
1722 val = PR_ENDIAN_PPC_LITTLE;
1724 val = PR_ENDIAN_BIG;
1726 return put_user(val, (unsigned int __user *)adr);
1729 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1731 tsk->thread.align_ctl = val;
1735 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1737 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1740 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1741 unsigned long nbytes)
1743 unsigned long stack_page;
1744 unsigned long cpu = task_cpu(p);
1747 * Avoid crashing if the stack has overflowed and corrupted
1748 * task_cpu(p), which is in the thread_info struct.
1750 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1751 stack_page = (unsigned long) hardirq_ctx[cpu];
1752 if (sp >= stack_page + sizeof(struct thread_struct)
1753 && sp <= stack_page + THREAD_SIZE - nbytes)
1756 stack_page = (unsigned long) softirq_ctx[cpu];
1757 if (sp >= stack_page + sizeof(struct thread_struct)
1758 && sp <= stack_page + THREAD_SIZE - nbytes)
1764 int validate_sp(unsigned long sp, struct task_struct *p,
1765 unsigned long nbytes)
1767 unsigned long stack_page = (unsigned long)task_stack_page(p);
1769 if (sp >= stack_page + sizeof(struct thread_struct)
1770 && sp <= stack_page + THREAD_SIZE - nbytes)
1773 return valid_irq_stack(sp, p, nbytes);
1776 EXPORT_SYMBOL(validate_sp);
1778 unsigned long get_wchan(struct task_struct *p)
1780 unsigned long ip, sp;
1783 if (!p || p == current || p->state == TASK_RUNNING)
1787 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1791 sp = *(unsigned long *)sp;
1792 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1795 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1796 if (!in_sched_functions(ip))
1799 } while (count++ < 16);
1803 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1805 void show_stack(struct task_struct *tsk, unsigned long *stack)
1807 unsigned long sp, ip, lr, newsp;
1810 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1811 int curr_frame = current->curr_ret_stack;
1812 extern void return_to_handler(void);
1813 unsigned long rth = (unsigned long)return_to_handler;
1816 sp = (unsigned long) stack;
1821 sp = current_stack_pointer();
1823 sp = tsk->thread.ksp;
1827 printk("Call Trace:\n");
1829 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1832 stack = (unsigned long *) sp;
1834 ip = stack[STACK_FRAME_LR_SAVE];
1835 if (!firstframe || ip != lr) {
1836 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1837 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1838 if ((ip == rth) && curr_frame >= 0) {
1840 (void *)current->ret_stack[curr_frame].ret);
1845 printk(" (unreliable)");
1851 * See if this is an exception frame.
1852 * We look for the "regshere" marker in the current frame.
1854 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1855 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1856 struct pt_regs *regs = (struct pt_regs *)
1857 (sp + STACK_FRAME_OVERHEAD);
1859 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
1860 regs->trap, (void *)regs->nip, (void *)lr);
1865 } while (count++ < kstack_depth_to_print);
1869 /* Called with hard IRQs off */
1870 void notrace __ppc64_runlatch_on(void)
1872 struct thread_info *ti = current_thread_info();
1875 ctrl = mfspr(SPRN_CTRLF);
1876 ctrl |= CTRL_RUNLATCH;
1877 mtspr(SPRN_CTRLT, ctrl);
1879 ti->local_flags |= _TLF_RUNLATCH;
1882 /* Called with hard IRQs off */
1883 void notrace __ppc64_runlatch_off(void)
1885 struct thread_info *ti = current_thread_info();
1888 ti->local_flags &= ~_TLF_RUNLATCH;
1890 ctrl = mfspr(SPRN_CTRLF);
1891 ctrl &= ~CTRL_RUNLATCH;
1892 mtspr(SPRN_CTRLT, ctrl);
1894 #endif /* CONFIG_PPC64 */
1896 unsigned long arch_align_stack(unsigned long sp)
1898 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1899 sp -= get_random_int() & ~PAGE_MASK;
1903 static inline unsigned long brk_rnd(void)
1905 unsigned long rnd = 0;
1907 /* 8MB for 32bit, 1GB for 64bit */
1908 if (is_32bit_task())
1909 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1911 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
1913 return rnd << PAGE_SHIFT;
1916 unsigned long arch_randomize_brk(struct mm_struct *mm)
1918 unsigned long base = mm->brk;
1921 #ifdef CONFIG_PPC_STD_MMU_64
1923 * If we are using 1TB segments and we are allowed to randomise
1924 * the heap, we can put it above 1TB so it is backed by a 1TB
1925 * segment. Otherwise the heap will be in the bottom 1TB
1926 * which always uses 256MB segments and this may result in a
1927 * performance penalty.
1929 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1930 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1933 ret = PAGE_ALIGN(base + brk_rnd());