powerpc/mm: Fixup preempt underflow with huge pages
[linux-2.6-block.git] / arch / powerpc / kernel / process.c
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
14cf11af
PM
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
14cf11af
PM
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
14cf11af
PM
28#include <linux/prctl.h>
29#include <linux/init_task.h>
4b16f8e2 30#include <linux/export.h>
14cf11af
PM
31#include <linux/kallsyms.h>
32#include <linux/mqueue.h>
33#include <linux/hardirq.h>
06d67d54 34#include <linux/utsname.h>
6794c782 35#include <linux/ftrace.h>
79741dd3 36#include <linux/kernel_stat.h>
d839088c
AB
37#include <linux/personality.h>
38#include <linux/random.h>
5aae8a53 39#include <linux/hw_breakpoint.h>
7b051f66 40#include <linux/uaccess.h>
14cf11af
PM
41
42#include <asm/pgtable.h>
14cf11af
PM
43#include <asm/io.h>
44#include <asm/processor.h>
45#include <asm/mmu.h>
46#include <asm/prom.h>
76032de8 47#include <asm/machdep.h>
c6622f63 48#include <asm/time.h>
ae3a197e 49#include <asm/runlatch.h>
a7f31841 50#include <asm/syscalls.h>
ae3a197e 51#include <asm/switch_to.h>
fb09692e 52#include <asm/tm.h>
ae3a197e 53#include <asm/debug.h>
06d67d54
PM
54#ifdef CONFIG_PPC64
55#include <asm/firmware.h>
06d67d54 56#endif
7cedd601 57#include <asm/code-patching.h>
d6a61bfc
LM
58#include <linux/kprobes.h>
59#include <linux/kdebug.h>
14cf11af 60
8b3c34cf
MN
61/* Transactional Memory debug */
62#ifdef TM_DEBUG_SW
63#define TM_DEBUG(x...) printk(KERN_INFO x)
64#else
65#define TM_DEBUG(x...) do { } while(0)
66#endif
67
14cf11af
PM
68extern unsigned long _get_SP(void);
69
d31626f7 70#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
b86fd2bd 71static void check_if_tm_restore_required(struct task_struct *tsk)
d31626f7
PM
72{
73 /*
74 * If we are saving the current thread's registers, and the
75 * thread is in a transactional state, set the TIF_RESTORE_TM
76 * bit so that we know to restore the registers before
77 * returning to userspace.
78 */
79 if (tsk == current && tsk->thread.regs &&
80 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
81 !test_thread_flag(TIF_RESTORE_TM)) {
829023df 82 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
d31626f7
PM
83 set_thread_flag(TIF_RESTORE_TM);
84 }
d31626f7 85}
d31626f7 86#else
b86fd2bd 87static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
d31626f7
PM
88#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
89
3eb5d588
AB
90bool strict_msr_control;
91EXPORT_SYMBOL(strict_msr_control);
92
93static int __init enable_strict_msr_control(char *str)
94{
95 strict_msr_control = true;
96 pr_info("Enabling strict facility control\n");
97
98 return 0;
99}
100early_param("ppc_strict_facility_enable", enable_strict_msr_control);
101
102void msr_check_and_set(unsigned long bits)
98da581e 103{
a0e72cf1
AB
104 unsigned long oldmsr = mfmsr();
105 unsigned long newmsr;
98da581e 106
a0e72cf1 107 newmsr = oldmsr | bits;
98da581e 108
98da581e 109#ifdef CONFIG_VSX
a0e72cf1 110 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
98da581e
AB
111 newmsr |= MSR_VSX;
112#endif
a0e72cf1 113
98da581e
AB
114 if (oldmsr != newmsr)
115 mtmsr_isync(newmsr);
a0e72cf1 116}
98da581e 117
3eb5d588 118void __msr_check_and_clear(unsigned long bits)
a0e72cf1
AB
119{
120 unsigned long oldmsr = mfmsr();
121 unsigned long newmsr;
122
123 newmsr = oldmsr & ~bits;
124
125#ifdef CONFIG_VSX
126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
127 newmsr &= ~MSR_VSX;
128#endif
129
130 if (oldmsr != newmsr)
131 mtmsr_isync(newmsr);
132}
3eb5d588 133EXPORT_SYMBOL(__msr_check_and_clear);
a0e72cf1
AB
134
135#ifdef CONFIG_PPC_FPU
8792468d
CB
136void __giveup_fpu(struct task_struct *tsk)
137{
138 save_fpu(tsk);
139 tsk->thread.regs->msr &= ~MSR_FP;
140#ifdef CONFIG_VSX
141 if (cpu_has_feature(CPU_FTR_VSX))
142 tsk->thread.regs->msr &= ~MSR_VSX;
143#endif
144}
145
a0e72cf1
AB
146void giveup_fpu(struct task_struct *tsk)
147{
148 check_if_tm_restore_required(tsk);
149
150 msr_check_and_set(MSR_FP);
98da581e 151 __giveup_fpu(tsk);
a0e72cf1 152 msr_check_and_clear(MSR_FP);
98da581e
AB
153}
154EXPORT_SYMBOL(giveup_fpu);
155
14cf11af
PM
156/*
157 * Make sure the floating-point register state in the
158 * the thread_struct is up to date for task tsk.
159 */
160void flush_fp_to_thread(struct task_struct *tsk)
161{
162 if (tsk->thread.regs) {
163 /*
164 * We need to disable preemption here because if we didn't,
165 * another process could get scheduled after the regs->msr
166 * test but before we have finished saving the FP registers
167 * to the thread_struct. That process could take over the
168 * FPU, and then when we get scheduled again we would store
169 * bogus values for the remaining FP registers.
170 */
171 preempt_disable();
172 if (tsk->thread.regs->msr & MSR_FP) {
14cf11af
PM
173 /*
174 * This should only ever be called for current or
175 * for a stopped child process. Since we save away
af1bbc3d 176 * the FP register state on context switch,
14cf11af
PM
177 * there is something wrong if a stopped child appears
178 * to still have its FP state in the CPU registers.
179 */
180 BUG_ON(tsk != current);
b86fd2bd 181 giveup_fpu(tsk);
14cf11af
PM
182 }
183 preempt_enable();
184 }
185}
de56a948 186EXPORT_SYMBOL_GPL(flush_fp_to_thread);
14cf11af
PM
187
188void enable_kernel_fp(void)
189{
190 WARN_ON(preemptible());
191
a0e72cf1 192 msr_check_and_set(MSR_FP);
611b0e5c 193
d64d02ce
AB
194 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
195 check_if_tm_restore_required(current);
a0e72cf1 196 __giveup_fpu(current);
d64d02ce 197 }
14cf11af
PM
198}
199EXPORT_SYMBOL(enable_kernel_fp);
70fe3d98
CB
200
201static int restore_fp(struct task_struct *tsk) {
202 if (tsk->thread.load_fp) {
203 load_fp_state(&current->thread.fp_state);
204 current->thread.load_fp++;
205 return 1;
206 }
207 return 0;
208}
209#else
210static int restore_fp(struct task_struct *tsk) { return 0; }
d1e1cf2e 211#endif /* CONFIG_PPC_FPU */
14cf11af 212
14cf11af 213#ifdef CONFIG_ALTIVEC
70fe3d98
CB
214#define loadvec(thr) ((thr).load_vec)
215
6f515d84
CB
216static void __giveup_altivec(struct task_struct *tsk)
217{
218 save_altivec(tsk);
219 tsk->thread.regs->msr &= ~MSR_VEC;
220#ifdef CONFIG_VSX
221 if (cpu_has_feature(CPU_FTR_VSX))
222 tsk->thread.regs->msr &= ~MSR_VSX;
223#endif
224}
225
98da581e
AB
226void giveup_altivec(struct task_struct *tsk)
227{
98da581e
AB
228 check_if_tm_restore_required(tsk);
229
a0e72cf1 230 msr_check_and_set(MSR_VEC);
98da581e 231 __giveup_altivec(tsk);
a0e72cf1 232 msr_check_and_clear(MSR_VEC);
98da581e
AB
233}
234EXPORT_SYMBOL(giveup_altivec);
235
14cf11af
PM
236void enable_kernel_altivec(void)
237{
238 WARN_ON(preemptible());
239
a0e72cf1 240 msr_check_and_set(MSR_VEC);
611b0e5c 241
d64d02ce
AB
242 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
243 check_if_tm_restore_required(current);
a0e72cf1 244 __giveup_altivec(current);
d64d02ce 245 }
14cf11af
PM
246}
247EXPORT_SYMBOL(enable_kernel_altivec);
248
249/*
250 * Make sure the VMX/Altivec register state in the
251 * the thread_struct is up to date for task tsk.
252 */
253void flush_altivec_to_thread(struct task_struct *tsk)
254{
255 if (tsk->thread.regs) {
256 preempt_disable();
257 if (tsk->thread.regs->msr & MSR_VEC) {
14cf11af 258 BUG_ON(tsk != current);
b86fd2bd 259 giveup_altivec(tsk);
14cf11af
PM
260 }
261 preempt_enable();
262 }
263}
de56a948 264EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
70fe3d98
CB
265
266static int restore_altivec(struct task_struct *tsk)
267{
268 if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
269 load_vr_state(&tsk->thread.vr_state);
270 tsk->thread.used_vr = 1;
271 tsk->thread.load_vec++;
272
273 return 1;
274 }
275 return 0;
276}
277#else
278#define loadvec(thr) 0
279static inline int restore_altivec(struct task_struct *tsk) { return 0; }
14cf11af
PM
280#endif /* CONFIG_ALTIVEC */
281
ce48b210 282#ifdef CONFIG_VSX
bf6a4d5b 283static void __giveup_vsx(struct task_struct *tsk)
a7d623d4 284{
a7d623d4
AB
285 if (tsk->thread.regs->msr & MSR_FP)
286 __giveup_fpu(tsk);
287 if (tsk->thread.regs->msr & MSR_VEC)
288 __giveup_altivec(tsk);
bf6a4d5b
CB
289 tsk->thread.regs->msr &= ~MSR_VSX;
290}
291
292static void giveup_vsx(struct task_struct *tsk)
293{
294 check_if_tm_restore_required(tsk);
295
296 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
a7d623d4 297 __giveup_vsx(tsk);
a0e72cf1 298 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
a7d623d4 299}
bf6a4d5b
CB
300
301static void save_vsx(struct task_struct *tsk)
302{
303 if (tsk->thread.regs->msr & MSR_FP)
304 save_fpu(tsk);
305 if (tsk->thread.regs->msr & MSR_VEC)
306 save_altivec(tsk);
307}
a7d623d4 308
ce48b210
MN
309void enable_kernel_vsx(void)
310{
311 WARN_ON(preemptible());
312
a0e72cf1 313 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
611b0e5c 314
a0e72cf1 315 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
d64d02ce 316 check_if_tm_restore_required(current);
a0e72cf1
AB
317 if (current->thread.regs->msr & MSR_FP)
318 __giveup_fpu(current);
319 if (current->thread.regs->msr & MSR_VEC)
320 __giveup_altivec(current);
321 __giveup_vsx(current);
611b0e5c 322 }
ce48b210
MN
323}
324EXPORT_SYMBOL(enable_kernel_vsx);
ce48b210
MN
325
326void flush_vsx_to_thread(struct task_struct *tsk)
327{
328 if (tsk->thread.regs) {
329 preempt_disable();
330 if (tsk->thread.regs->msr & MSR_VSX) {
ce48b210 331 BUG_ON(tsk != current);
ce48b210
MN
332 giveup_vsx(tsk);
333 }
334 preempt_enable();
335 }
336}
de56a948 337EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
70fe3d98
CB
338
339static int restore_vsx(struct task_struct *tsk)
340{
341 if (cpu_has_feature(CPU_FTR_VSX)) {
342 tsk->thread.used_vsr = 1;
343 return 1;
344 }
345
346 return 0;
347}
348#else
349static inline int restore_vsx(struct task_struct *tsk) { return 0; }
bf6a4d5b 350static inline void save_vsx(struct task_struct *tsk) { }
ce48b210
MN
351#endif /* CONFIG_VSX */
352
14cf11af 353#ifdef CONFIG_SPE
98da581e
AB
354void giveup_spe(struct task_struct *tsk)
355{
98da581e
AB
356 check_if_tm_restore_required(tsk);
357
a0e72cf1 358 msr_check_and_set(MSR_SPE);
98da581e 359 __giveup_spe(tsk);
a0e72cf1 360 msr_check_and_clear(MSR_SPE);
98da581e
AB
361}
362EXPORT_SYMBOL(giveup_spe);
14cf11af
PM
363
364void enable_kernel_spe(void)
365{
366 WARN_ON(preemptible());
367
a0e72cf1 368 msr_check_and_set(MSR_SPE);
611b0e5c 369
d64d02ce
AB
370 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
371 check_if_tm_restore_required(current);
a0e72cf1 372 __giveup_spe(current);
d64d02ce 373 }
14cf11af
PM
374}
375EXPORT_SYMBOL(enable_kernel_spe);
376
377void flush_spe_to_thread(struct task_struct *tsk)
378{
379 if (tsk->thread.regs) {
380 preempt_disable();
381 if (tsk->thread.regs->msr & MSR_SPE) {
14cf11af 382 BUG_ON(tsk != current);
685659ee 383 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
0ee6c15e 384 giveup_spe(tsk);
14cf11af
PM
385 }
386 preempt_enable();
387 }
388}
14cf11af
PM
389#endif /* CONFIG_SPE */
390
c2085059
AB
391static unsigned long msr_all_available;
392
393static int __init init_msr_all_available(void)
394{
395#ifdef CONFIG_PPC_FPU
396 msr_all_available |= MSR_FP;
397#endif
398#ifdef CONFIG_ALTIVEC
399 if (cpu_has_feature(CPU_FTR_ALTIVEC))
400 msr_all_available |= MSR_VEC;
401#endif
402#ifdef CONFIG_VSX
403 if (cpu_has_feature(CPU_FTR_VSX))
404 msr_all_available |= MSR_VSX;
405#endif
406#ifdef CONFIG_SPE
407 if (cpu_has_feature(CPU_FTR_SPE))
408 msr_all_available |= MSR_SPE;
409#endif
410
411 return 0;
412}
413early_initcall(init_msr_all_available);
414
415void giveup_all(struct task_struct *tsk)
416{
417 unsigned long usermsr;
418
419 if (!tsk->thread.regs)
420 return;
421
422 usermsr = tsk->thread.regs->msr;
423
424 if ((usermsr & msr_all_available) == 0)
425 return;
426
427 msr_check_and_set(msr_all_available);
428
429#ifdef CONFIG_PPC_FPU
430 if (usermsr & MSR_FP)
431 __giveup_fpu(tsk);
432#endif
433#ifdef CONFIG_ALTIVEC
434 if (usermsr & MSR_VEC)
435 __giveup_altivec(tsk);
436#endif
437#ifdef CONFIG_VSX
438 if (usermsr & MSR_VSX)
439 __giveup_vsx(tsk);
440#endif
441#ifdef CONFIG_SPE
442 if (usermsr & MSR_SPE)
443 __giveup_spe(tsk);
444#endif
445
446 msr_check_and_clear(msr_all_available);
447}
448EXPORT_SYMBOL(giveup_all);
449
70fe3d98
CB
450void restore_math(struct pt_regs *regs)
451{
452 unsigned long msr;
453
454 if (!current->thread.load_fp && !loadvec(current->thread))
455 return;
456
457 msr = regs->msr;
458 msr_check_and_set(msr_all_available);
459
460 /*
461 * Only reload if the bit is not set in the user MSR, the bit BEING set
462 * indicates that the registers are hot
463 */
464 if ((!(msr & MSR_FP)) && restore_fp(current))
465 msr |= MSR_FP | current->thread.fpexc_mode;
466
467 if ((!(msr & MSR_VEC)) && restore_altivec(current))
468 msr |= MSR_VEC;
469
470 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
471 restore_vsx(current)) {
472 msr |= MSR_VSX;
473 }
474
475 msr_check_and_clear(msr_all_available);
476
477 regs->msr = msr;
478}
479
de2a20aa
CB
480void save_all(struct task_struct *tsk)
481{
482 unsigned long usermsr;
483
484 if (!tsk->thread.regs)
485 return;
486
487 usermsr = tsk->thread.regs->msr;
488
489 if ((usermsr & msr_all_available) == 0)
490 return;
491
492 msr_check_and_set(msr_all_available);
493
bf6a4d5b
CB
494 /*
495 * Saving the way the register space is in hardware, save_vsx boils
496 * down to a save_fpu() and save_altivec()
497 */
498 if (usermsr & MSR_VSX) {
499 save_vsx(tsk);
500 } else {
501 if (usermsr & MSR_FP)
502 save_fpu(tsk);
503
504 if (usermsr & MSR_VEC)
505 save_altivec(tsk);
506 }
de2a20aa
CB
507
508 if (usermsr & MSR_SPE)
509 __giveup_spe(tsk);
510
511 msr_check_and_clear(msr_all_available);
512}
513
579e633e
AB
514void flush_all_to_thread(struct task_struct *tsk)
515{
516 if (tsk->thread.regs) {
517 preempt_disable();
518 BUG_ON(tsk != current);
de2a20aa 519 save_all(tsk);
579e633e
AB
520
521#ifdef CONFIG_SPE
522 if (tsk->thread.regs->msr & MSR_SPE)
523 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
524#endif
525
526 preempt_enable();
527 }
528}
529EXPORT_SYMBOL(flush_all_to_thread);
530
3bffb652
DK
531#ifdef CONFIG_PPC_ADV_DEBUG_REGS
532void do_send_trap(struct pt_regs *regs, unsigned long address,
533 unsigned long error_code, int signal_code, int breakpt)
534{
535 siginfo_t info;
536
41ab5266 537 current->thread.trap_nr = signal_code;
3bffb652
DK
538 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
539 11, SIGSEGV) == NOTIFY_STOP)
540 return;
541
542 /* Deliver the signal to userspace */
543 info.si_signo = SIGTRAP;
544 info.si_errno = breakpt; /* breakpoint or watchpoint id */
545 info.si_code = signal_code;
546 info.si_addr = (void __user *)address;
547 force_sig_info(SIGTRAP, &info, current);
548}
549#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
9422de3e 550void do_break (struct pt_regs *regs, unsigned long address,
d6a61bfc
LM
551 unsigned long error_code)
552{
553 siginfo_t info;
554
41ab5266 555 current->thread.trap_nr = TRAP_HWBKPT;
d6a61bfc
LM
556 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
557 11, SIGSEGV) == NOTIFY_STOP)
558 return;
559
9422de3e 560 if (debugger_break_match(regs))
d6a61bfc
LM
561 return;
562
9422de3e
MN
563 /* Clear the breakpoint */
564 hw_breakpoint_disable();
d6a61bfc
LM
565
566 /* Deliver the signal to userspace */
567 info.si_signo = SIGTRAP;
568 info.si_errno = 0;
569 info.si_code = TRAP_HWBKPT;
570 info.si_addr = (void __user *)address;
571 force_sig_info(SIGTRAP, &info, current);
572}
3bffb652 573#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
d6a61bfc 574
9422de3e 575static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
a2ceff5e 576
3bffb652
DK
577#ifdef CONFIG_PPC_ADV_DEBUG_REGS
578/*
579 * Set the debug registers back to their default "safe" values.
580 */
581static void set_debug_reg_defaults(struct thread_struct *thread)
582{
51ae8d4a 583 thread->debug.iac1 = thread->debug.iac2 = 0;
3bffb652 584#if CONFIG_PPC_ADV_DEBUG_IACS > 2
51ae8d4a 585 thread->debug.iac3 = thread->debug.iac4 = 0;
3bffb652 586#endif
51ae8d4a 587 thread->debug.dac1 = thread->debug.dac2 = 0;
3bffb652 588#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
51ae8d4a 589 thread->debug.dvc1 = thread->debug.dvc2 = 0;
3bffb652 590#endif
51ae8d4a 591 thread->debug.dbcr0 = 0;
3bffb652
DK
592#ifdef CONFIG_BOOKE
593 /*
594 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
595 */
51ae8d4a 596 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
3bffb652
DK
597 DBCR1_IAC3US | DBCR1_IAC4US;
598 /*
599 * Force Data Address Compare User/Supervisor bits to be User-only
600 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
601 */
51ae8d4a 602 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
3bffb652 603#else
51ae8d4a 604 thread->debug.dbcr1 = 0;
3bffb652
DK
605#endif
606}
607
f5f97210 608static void prime_debug_regs(struct debug_reg *debug)
3bffb652 609{
6cecf76b
SW
610 /*
611 * We could have inherited MSR_DE from userspace, since
612 * it doesn't get cleared on exception entry. Make sure
613 * MSR_DE is clear before we enable any debug events.
614 */
615 mtmsr(mfmsr() & ~MSR_DE);
616
f5f97210
SW
617 mtspr(SPRN_IAC1, debug->iac1);
618 mtspr(SPRN_IAC2, debug->iac2);
3bffb652 619#if CONFIG_PPC_ADV_DEBUG_IACS > 2
f5f97210
SW
620 mtspr(SPRN_IAC3, debug->iac3);
621 mtspr(SPRN_IAC4, debug->iac4);
3bffb652 622#endif
f5f97210
SW
623 mtspr(SPRN_DAC1, debug->dac1);
624 mtspr(SPRN_DAC2, debug->dac2);
3bffb652 625#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
f5f97210
SW
626 mtspr(SPRN_DVC1, debug->dvc1);
627 mtspr(SPRN_DVC2, debug->dvc2);
3bffb652 628#endif
f5f97210
SW
629 mtspr(SPRN_DBCR0, debug->dbcr0);
630 mtspr(SPRN_DBCR1, debug->dbcr1);
3bffb652 631#ifdef CONFIG_BOOKE
f5f97210 632 mtspr(SPRN_DBCR2, debug->dbcr2);
3bffb652
DK
633#endif
634}
635/*
636 * Unless neither the old or new thread are making use of the
637 * debug registers, set the debug registers from the values
638 * stored in the new thread.
639 */
f5f97210 640void switch_booke_debug_regs(struct debug_reg *new_debug)
3bffb652 641{
51ae8d4a 642 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
f5f97210
SW
643 || (new_debug->dbcr0 & DBCR0_IDM))
644 prime_debug_regs(new_debug);
3bffb652 645}
3743c9b8 646EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
3bffb652 647#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
e0780b72 648#ifndef CONFIG_HAVE_HW_BREAKPOINT
3bffb652
DK
649static void set_debug_reg_defaults(struct thread_struct *thread)
650{
9422de3e
MN
651 thread->hw_brk.address = 0;
652 thread->hw_brk.type = 0;
b9818c33 653 set_breakpoint(&thread->hw_brk);
3bffb652 654}
e0780b72 655#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
3bffb652
DK
656#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
657
172ae2e7 658#ifdef CONFIG_PPC_ADV_DEBUG_REGS
9422de3e
MN
659static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
660{
d6a61bfc 661 mtspr(SPRN_DAC1, dabr);
221c185d
DK
662#ifdef CONFIG_PPC_47x
663 isync();
664#endif
9422de3e
MN
665 return 0;
666}
c6c9eace 667#elif defined(CONFIG_PPC_BOOK3S)
9422de3e
MN
668static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
669{
c6c9eace 670 mtspr(SPRN_DABR, dabr);
82a9f16a
MN
671 if (cpu_has_feature(CPU_FTR_DABRX))
672 mtspr(SPRN_DABRX, dabrx);
cab0af98 673 return 0;
14cf11af 674}
9422de3e
MN
675#else
676static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
677{
678 return -EINVAL;
679}
680#endif
681
682static inline int set_dabr(struct arch_hw_breakpoint *brk)
683{
684 unsigned long dabr, dabrx;
685
686 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
687 dabrx = ((brk->type >> 3) & 0x7);
688
689 if (ppc_md.set_dabr)
690 return ppc_md.set_dabr(dabr, dabrx);
691
692 return __set_dabr(dabr, dabrx);
693}
694
bf99de36
MN
695static inline int set_dawr(struct arch_hw_breakpoint *brk)
696{
05d694ea 697 unsigned long dawr, dawrx, mrd;
bf99de36
MN
698
699 dawr = brk->address;
700
701 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
702 << (63 - 58); //* read/write bits */
703 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
704 << (63 - 59); //* translate */
705 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
706 >> 3; //* PRIM bits */
05d694ea
MN
707 /* dawr length is stored in field MDR bits 48:53. Matches range in
708 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
709 0b111111=64DW.
710 brk->len is in bytes.
711 This aligns up to double word size, shifts and does the bias.
712 */
713 mrd = ((brk->len + 7) >> 3) - 1;
714 dawrx |= (mrd & 0x3f) << (63 - 53);
bf99de36
MN
715
716 if (ppc_md.set_dawr)
717 return ppc_md.set_dawr(dawr, dawrx);
718 mtspr(SPRN_DAWR, dawr);
719 mtspr(SPRN_DAWRX, dawrx);
720 return 0;
721}
722
21f58507 723void __set_breakpoint(struct arch_hw_breakpoint *brk)
9422de3e 724{
69111bac 725 memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
9422de3e 726
bf99de36 727 if (cpu_has_feature(CPU_FTR_DAWR))
04c32a51
PG
728 set_dawr(brk);
729 else
730 set_dabr(brk);
9422de3e 731}
14cf11af 732
21f58507
PG
733void set_breakpoint(struct arch_hw_breakpoint *brk)
734{
735 preempt_disable();
736 __set_breakpoint(brk);
737 preempt_enable();
738}
739
06d67d54
PM
740#ifdef CONFIG_PPC64
741DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
06d67d54 742#endif
14cf11af 743
9422de3e
MN
744static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
745 struct arch_hw_breakpoint *b)
746{
747 if (a->address != b->address)
748 return false;
749 if (a->type != b->type)
750 return false;
751 if (a->len != b->len)
752 return false;
753 return true;
754}
d31626f7 755
fb09692e 756#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
d31626f7
PM
757static void tm_reclaim_thread(struct thread_struct *thr,
758 struct thread_info *ti, uint8_t cause)
759{
760 unsigned long msr_diff = 0;
761
762 /*
763 * If FP/VSX registers have been already saved to the
764 * thread_struct, move them to the transact_fp array.
765 * We clear the TIF_RESTORE_TM bit since after the reclaim
766 * the thread will no longer be transactional.
767 */
768 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
829023df 769 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
d31626f7
PM
770 if (msr_diff & MSR_FP)
771 memcpy(&thr->transact_fp, &thr->fp_state,
772 sizeof(struct thread_fp_state));
773 if (msr_diff & MSR_VEC)
774 memcpy(&thr->transact_vr, &thr->vr_state,
775 sizeof(struct thread_vr_state));
776 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
777 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
778 }
779
7f821fc9
MN
780 /*
781 * Use the current MSR TM suspended bit to track if we have
782 * checkpointed state outstanding.
783 * On signal delivery, we'd normally reclaim the checkpointed
784 * state to obtain stack pointer (see:get_tm_stackpointer()).
785 * This will then directly return to userspace without going
786 * through __switch_to(). However, if the stack frame is bad,
787 * we need to exit this thread which calls __switch_to() which
788 * will again attempt to reclaim the already saved tm state.
789 * Hence we need to check that we've not already reclaimed
790 * this state.
791 * We do this using the current MSR, rather tracking it in
792 * some specific thread_struct bit, as it has the additional
793 * benifit of checking for a potential TM bad thing exception.
794 */
795 if (!MSR_TM_SUSPENDED(mfmsr()))
796 return;
797
d31626f7
PM
798 tm_reclaim(thr, thr->regs->msr, cause);
799
800 /* Having done the reclaim, we now have the checkpointed
801 * FP/VSX values in the registers. These might be valid
802 * even if we have previously called enable_kernel_fp() or
803 * flush_fp_to_thread(), so update thr->regs->msr to
804 * indicate their current validity.
805 */
806 thr->regs->msr |= msr_diff;
807}
808
809void tm_reclaim_current(uint8_t cause)
810{
811 tm_enable();
812 tm_reclaim_thread(&current->thread, current_thread_info(), cause);
813}
814
fb09692e
MN
815static inline void tm_reclaim_task(struct task_struct *tsk)
816{
817 /* We have to work out if we're switching from/to a task that's in the
818 * middle of a transaction.
819 *
820 * In switching we need to maintain a 2nd register state as
821 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
822 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
823 * (current) FPRs into oldtask->thread.transact_fpr[].
824 *
825 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
826 */
827 struct thread_struct *thr = &tsk->thread;
828
829 if (!thr->regs)
830 return;
831
832 if (!MSR_TM_ACTIVE(thr->regs->msr))
833 goto out_and_saveregs;
834
835 /* Stash the original thread MSR, as giveup_fpu et al will
836 * modify it. We hold onto it to see whether the task used
d31626f7 837 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
829023df 838 * ckpt_regs.msr is already set.
fb09692e 839 */
d31626f7 840 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
829023df 841 thr->ckpt_regs.msr = thr->regs->msr;
fb09692e
MN
842
843 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
844 "ccr=%lx, msr=%lx, trap=%lx)\n",
845 tsk->pid, thr->regs->nip,
846 thr->regs->ccr, thr->regs->msr,
847 thr->regs->trap);
848
d31626f7 849 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
fb09692e
MN
850
851 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
852 tsk->pid);
853
854out_and_saveregs:
855 /* Always save the regs here, even if a transaction's not active.
856 * This context-switches a thread's TM info SPRs. We do it here to
857 * be consistent with the restore path (in recheckpoint) which
858 * cannot happen later in _switch().
859 */
860 tm_save_sprs(thr);
861}
862
e6b8fd02
MN
863extern void __tm_recheckpoint(struct thread_struct *thread,
864 unsigned long orig_msr);
865
866void tm_recheckpoint(struct thread_struct *thread,
867 unsigned long orig_msr)
868{
869 unsigned long flags;
870
871 /* We really can't be interrupted here as the TEXASR registers can't
872 * change and later in the trecheckpoint code, we have a userspace R1.
873 * So let's hard disable over this region.
874 */
875 local_irq_save(flags);
876 hard_irq_disable();
877
878 /* The TM SPRs are restored here, so that TEXASR.FS can be set
879 * before the trecheckpoint and no explosion occurs.
880 */
881 tm_restore_sprs(thread);
882
883 __tm_recheckpoint(thread, orig_msr);
884
885 local_irq_restore(flags);
886}
887
bc2a9408 888static inline void tm_recheckpoint_new_task(struct task_struct *new)
fb09692e
MN
889{
890 unsigned long msr;
891
892 if (!cpu_has_feature(CPU_FTR_TM))
893 return;
894
895 /* Recheckpoint the registers of the thread we're about to switch to.
896 *
897 * If the task was using FP, we non-lazily reload both the original and
898 * the speculative FP register states. This is because the kernel
899 * doesn't see if/when a TM rollback occurs, so if we take an FP
900 * unavoidable later, we are unable to determine which set of FP regs
901 * need to be restored.
902 */
903 if (!new->thread.regs)
904 return;
905
e6b8fd02
MN
906 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
907 tm_restore_sprs(&new->thread);
fb09692e 908 return;
e6b8fd02 909 }
829023df 910 msr = new->thread.ckpt_regs.msr;
fb09692e
MN
911 /* Recheckpoint to restore original checkpointed register state. */
912 TM_DEBUG("*** tm_recheckpoint of pid %d "
913 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
914 new->pid, new->thread.regs->msr, msr);
915
916 /* This loads the checkpointed FP/VEC state, if used */
917 tm_recheckpoint(&new->thread, msr);
918
919 /* This loads the speculative FP/VEC state, if used */
920 if (msr & MSR_FP) {
921 do_load_up_transact_fpu(&new->thread);
922 new->thread.regs->msr |=
923 (MSR_FP | new->thread.fpexc_mode);
924 }
f110c0c1 925#ifdef CONFIG_ALTIVEC
fb09692e
MN
926 if (msr & MSR_VEC) {
927 do_load_up_transact_altivec(&new->thread);
928 new->thread.regs->msr |= MSR_VEC;
929 }
f110c0c1 930#endif
fb09692e
MN
931 /* We may as well turn on VSX too since all the state is restored now */
932 if (msr & MSR_VSX)
933 new->thread.regs->msr |= MSR_VSX;
934
935 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
936 "(kernel msr 0x%lx)\n",
937 new->pid, mfmsr());
938}
939
940static inline void __switch_to_tm(struct task_struct *prev)
941{
942 if (cpu_has_feature(CPU_FTR_TM)) {
943 tm_enable();
944 tm_reclaim_task(prev);
945 }
946}
d31626f7
PM
947
948/*
949 * This is called if we are on the way out to userspace and the
950 * TIF_RESTORE_TM flag is set. It checks if we need to reload
951 * FP and/or vector state and does so if necessary.
952 * If userspace is inside a transaction (whether active or
953 * suspended) and FP/VMX/VSX instructions have ever been enabled
954 * inside that transaction, then we have to keep them enabled
955 * and keep the FP/VMX/VSX state loaded while ever the transaction
956 * continues. The reason is that if we didn't, and subsequently
957 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
958 * we don't know whether it's the same transaction, and thus we
959 * don't know which of the checkpointed state and the transactional
960 * state to use.
961 */
962void restore_tm_state(struct pt_regs *regs)
963{
964 unsigned long msr_diff;
965
966 clear_thread_flag(TIF_RESTORE_TM);
967 if (!MSR_TM_ACTIVE(regs->msr))
968 return;
969
829023df 970 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
d31626f7 971 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
70fe3d98
CB
972
973 restore_math(regs);
974
d31626f7
PM
975 regs->msr |= msr_diff;
976}
977
fb09692e
MN
978#else
979#define tm_recheckpoint_new_task(new)
980#define __switch_to_tm(prev)
981#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
9422de3e 982
152d523e
AB
983static inline void save_sprs(struct thread_struct *t)
984{
985#ifdef CONFIG_ALTIVEC
986 if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
987 t->vrsave = mfspr(SPRN_VRSAVE);
988#endif
989#ifdef CONFIG_PPC_BOOK3S_64
990 if (cpu_has_feature(CPU_FTR_DSCR))
991 t->dscr = mfspr(SPRN_DSCR);
992
993 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
994 t->bescr = mfspr(SPRN_BESCR);
995 t->ebbhr = mfspr(SPRN_EBBHR);
996 t->ebbrr = mfspr(SPRN_EBBRR);
997
998 t->fscr = mfspr(SPRN_FSCR);
999
1000 /*
1001 * Note that the TAR is not available for use in the kernel.
1002 * (To provide this, the TAR should be backed up/restored on
1003 * exception entry/exit instead, and be in pt_regs. FIXME,
1004 * this should be in pt_regs anyway (for debug).)
1005 */
1006 t->tar = mfspr(SPRN_TAR);
1007 }
1008#endif
1009}
1010
1011static inline void restore_sprs(struct thread_struct *old_thread,
1012 struct thread_struct *new_thread)
1013{
1014#ifdef CONFIG_ALTIVEC
1015 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1016 old_thread->vrsave != new_thread->vrsave)
1017 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1018#endif
1019#ifdef CONFIG_PPC_BOOK3S_64
1020 if (cpu_has_feature(CPU_FTR_DSCR)) {
1021 u64 dscr = get_paca()->dscr_default;
1022 u64 fscr = old_thread->fscr & ~FSCR_DSCR;
1023
1024 if (new_thread->dscr_inherit) {
1025 dscr = new_thread->dscr;
1026 fscr |= FSCR_DSCR;
1027 }
1028
1029 if (old_thread->dscr != dscr)
1030 mtspr(SPRN_DSCR, dscr);
1031
1032 if (old_thread->fscr != fscr)
1033 mtspr(SPRN_FSCR, fscr);
1034 }
1035
1036 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1037 if (old_thread->bescr != new_thread->bescr)
1038 mtspr(SPRN_BESCR, new_thread->bescr);
1039 if (old_thread->ebbhr != new_thread->ebbhr)
1040 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1041 if (old_thread->ebbrr != new_thread->ebbrr)
1042 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1043
1044 if (old_thread->tar != new_thread->tar)
1045 mtspr(SPRN_TAR, new_thread->tar);
1046 }
1047#endif
1048}
1049
14cf11af
PM
1050struct task_struct *__switch_to(struct task_struct *prev,
1051 struct task_struct *new)
1052{
1053 struct thread_struct *new_thread, *old_thread;
14cf11af 1054 struct task_struct *last;
d6bf29b4
PZ
1055#ifdef CONFIG_PPC_BOOK3S_64
1056 struct ppc64_tlb_batch *batch;
1057#endif
14cf11af 1058
152d523e
AB
1059 new_thread = &new->thread;
1060 old_thread = &current->thread;
1061
7ba5fef7
MN
1062 WARN_ON(!irqs_disabled());
1063
06d67d54
PM
1064#ifdef CONFIG_PPC64
1065 /*
1066 * Collect processor utilization data per process
1067 */
1068 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
69111bac 1069 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
06d67d54
PM
1070 long unsigned start_tb, current_tb;
1071 start_tb = old_thread->start_tb;
1072 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1073 old_thread->accum_tb += (current_tb - start_tb);
1074 new_thread->start_tb = current_tb;
1075 }
d6bf29b4
PZ
1076#endif /* CONFIG_PPC64 */
1077
1078#ifdef CONFIG_PPC_BOOK3S_64
69111bac 1079 batch = this_cpu_ptr(&ppc64_tlb_batch);
d6bf29b4
PZ
1080 if (batch->active) {
1081 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1082 if (batch->index)
1083 __flush_tlb_pending(batch);
1084 batch->active = 0;
1085 }
1086#endif /* CONFIG_PPC_BOOK3S_64 */
06d67d54 1087
f3d885cc
AB
1088#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1089 switch_booke_debug_regs(&new->thread.debug);
1090#else
1091/*
1092 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1093 * schedule DABR
1094 */
1095#ifndef CONFIG_HAVE_HW_BREAKPOINT
1096 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
1097 __set_breakpoint(&new->thread.hw_brk);
1098#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1099#endif
1100
1101 /*
1102 * We need to save SPRs before treclaim/trecheckpoint as these will
1103 * change a number of them.
1104 */
1105 save_sprs(&prev->thread);
1106
1107 __switch_to_tm(prev);
1108
1109 /* Save FPU, Altivec, VSX and SPE state */
1110 giveup_all(prev);
1111
44387e9f
AB
1112 /*
1113 * We can't take a PMU exception inside _switch() since there is a
1114 * window where the kernel stack SLB and the kernel stack are out
1115 * of sync. Hard disable here.
1116 */
1117 hard_irq_disable();
bc2a9408
MN
1118
1119 tm_recheckpoint_new_task(new);
1120
20dbe670
AB
1121 /*
1122 * Call restore_sprs() before calling _switch(). If we move it after
1123 * _switch() then we miss out on calling it for new tasks. The reason
1124 * for this is we manually create a stack frame for new tasks that
1125 * directly returns through ret_from_fork() or
1126 * ret_from_kernel_thread(). See copy_thread() for details.
1127 */
f3d885cc
AB
1128 restore_sprs(old_thread, new_thread);
1129
20dbe670
AB
1130 last = _switch(old_thread, new_thread);
1131
d6bf29b4
PZ
1132#ifdef CONFIG_PPC_BOOK3S_64
1133 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1134 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
69111bac 1135 batch = this_cpu_ptr(&ppc64_tlb_batch);
d6bf29b4
PZ
1136 batch->active = 1;
1137 }
70fe3d98
CB
1138
1139 if (current_thread_info()->task->thread.regs)
1140 restore_math(current_thread_info()->task->thread.regs);
1141
d6bf29b4
PZ
1142#endif /* CONFIG_PPC_BOOK3S_64 */
1143
14cf11af
PM
1144 return last;
1145}
1146
06d67d54
PM
1147static int instructions_to_print = 16;
1148
06d67d54
PM
1149static void show_instructions(struct pt_regs *regs)
1150{
1151 int i;
1152 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1153 sizeof(int));
1154
1155 printk("Instruction dump:");
1156
1157 for (i = 0; i < instructions_to_print; i++) {
1158 int instr;
1159
1160 if (!(i % 8))
1161 printk("\n");
1162
0de2d820
SW
1163#if !defined(CONFIG_BOOKE)
1164 /* If executing with the IMMU off, adjust pc rather
1165 * than print XXXXXXXX.
1166 */
1167 if (!(regs->msr & MSR_IR))
1168 pc = (unsigned long)phys_to_virt(pc);
1169#endif
1170
00ae36de 1171 if (!__kernel_text_address(pc) ||
7b051f66 1172 probe_kernel_address((unsigned int __user *)pc, instr)) {
40c8cefa 1173 printk(KERN_CONT "XXXXXXXX ");
06d67d54
PM
1174 } else {
1175 if (regs->nip == pc)
40c8cefa 1176 printk(KERN_CONT "<%08x> ", instr);
06d67d54 1177 else
40c8cefa 1178 printk(KERN_CONT "%08x ", instr);
06d67d54
PM
1179 }
1180
1181 pc += sizeof(int);
1182 }
1183
1184 printk("\n");
1185}
1186
801c0b2c 1187struct regbit {
06d67d54
PM
1188 unsigned long bit;
1189 const char *name;
801c0b2c
MN
1190};
1191
1192static struct regbit msr_bits[] = {
3bfd0c9c
AB
1193#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1194 {MSR_SF, "SF"},
1195 {MSR_HV, "HV"},
1196#endif
1197 {MSR_VEC, "VEC"},
1198 {MSR_VSX, "VSX"},
1199#ifdef CONFIG_BOOKE
1200 {MSR_CE, "CE"},
1201#endif
06d67d54
PM
1202 {MSR_EE, "EE"},
1203 {MSR_PR, "PR"},
1204 {MSR_FP, "FP"},
1205 {MSR_ME, "ME"},
3bfd0c9c 1206#ifdef CONFIG_BOOKE
1b98326b 1207 {MSR_DE, "DE"},
3bfd0c9c
AB
1208#else
1209 {MSR_SE, "SE"},
1210 {MSR_BE, "BE"},
1211#endif
06d67d54
PM
1212 {MSR_IR, "IR"},
1213 {MSR_DR, "DR"},
3bfd0c9c
AB
1214 {MSR_PMM, "PMM"},
1215#ifndef CONFIG_BOOKE
1216 {MSR_RI, "RI"},
1217 {MSR_LE, "LE"},
1218#endif
06d67d54
PM
1219 {0, NULL}
1220};
1221
801c0b2c 1222static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
06d67d54 1223{
801c0b2c 1224 const char *s = "";
06d67d54 1225
06d67d54
PM
1226 for (; bits->bit; ++bits)
1227 if (val & bits->bit) {
801c0b2c
MN
1228 printk("%s%s", s, bits->name);
1229 s = sep;
06d67d54 1230 }
801c0b2c
MN
1231}
1232
1233#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1234static struct regbit msr_tm_bits[] = {
1235 {MSR_TS_T, "T"},
1236 {MSR_TS_S, "S"},
1237 {MSR_TM, "E"},
1238 {0, NULL}
1239};
1240
1241static void print_tm_bits(unsigned long val)
1242{
1243/*
1244 * This only prints something if at least one of the TM bit is set.
1245 * Inside the TM[], the output means:
1246 * E: Enabled (bit 32)
1247 * S: Suspended (bit 33)
1248 * T: Transactional (bit 34)
1249 */
1250 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1251 printk(",TM[");
1252 print_bits(val, msr_tm_bits, "");
1253 printk("]");
1254 }
1255}
1256#else
1257static void print_tm_bits(unsigned long val) {}
1258#endif
1259
1260static void print_msr_bits(unsigned long val)
1261{
1262 printk("<");
1263 print_bits(val, msr_bits, ",");
1264 print_tm_bits(val);
06d67d54
PM
1265 printk(">");
1266}
1267
1268#ifdef CONFIG_PPC64
f6f7dde3 1269#define REG "%016lx"
06d67d54
PM
1270#define REGS_PER_LINE 4
1271#define LAST_VOLATILE 13
1272#else
f6f7dde3 1273#define REG "%08lx"
06d67d54
PM
1274#define REGS_PER_LINE 8
1275#define LAST_VOLATILE 12
1276#endif
1277
14cf11af
PM
1278void show_regs(struct pt_regs * regs)
1279{
1280 int i, trap;
1281
a43cb95d
TH
1282 show_regs_print_info(KERN_DEFAULT);
1283
06d67d54
PM
1284 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1285 regs->nip, regs->link, regs->ctr);
1286 printk("REGS: %p TRAP: %04lx %s (%s)\n",
96b644bd 1287 regs, regs->trap, print_tainted(), init_utsname()->release);
06d67d54 1288 printk("MSR: "REG" ", regs->msr);
801c0b2c 1289 print_msr_bits(regs->msr);
f6f7dde3 1290 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
14cf11af 1291 trap = TRAP(regs);
5115a026 1292 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
9db8bcfd 1293 printk("CFAR: "REG" ", regs->orig_gpr3);
c5400649 1294 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
ba28c9aa 1295#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
9db8bcfd 1296 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
14170789 1297#else
9db8bcfd
AB
1298 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1299#endif
1300#ifdef CONFIG_PPC64
1301 printk("SOFTE: %ld ", regs->softe);
1302#endif
1303#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
6d888d1a
AB
1304 if (MSR_TM_ACTIVE(regs->msr))
1305 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
14170789 1306#endif
14cf11af
PM
1307
1308 for (i = 0; i < 32; i++) {
06d67d54 1309 if ((i % REGS_PER_LINE) == 0)
a2367194 1310 printk("\nGPR%02d: ", i);
06d67d54
PM
1311 printk(REG " ", regs->gpr[i]);
1312 if (i == LAST_VOLATILE && !FULL_REGS(regs))
14cf11af
PM
1313 break;
1314 }
1315 printk("\n");
1316#ifdef CONFIG_KALLSYMS
1317 /*
1318 * Lookup NIP late so we have the best change of getting the
1319 * above info out without failing
1320 */
058c78f4
BH
1321 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1322 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
afc07701 1323#endif
14cf11af 1324 show_stack(current, (unsigned long *) regs->gpr[1]);
06d67d54
PM
1325 if (!user_mode(regs))
1326 show_instructions(regs);
14cf11af
PM
1327}
1328
1329void exit_thread(void)
1330{
14cf11af
PM
1331}
1332
1333void flush_thread(void)
1334{
e0780b72 1335#ifdef CONFIG_HAVE_HW_BREAKPOINT
5aae8a53 1336 flush_ptrace_hw_breakpoint(current);
e0780b72 1337#else /* CONFIG_HAVE_HW_BREAKPOINT */
3bffb652 1338 set_debug_reg_defaults(&current->thread);
e0780b72 1339#endif /* CONFIG_HAVE_HW_BREAKPOINT */
14cf11af
PM
1340}
1341
1342void
1343release_thread(struct task_struct *t)
1344{
1345}
1346
1347/*
55ccf3fe
SS
1348 * this gets called so that we can store coprocessor state into memory and
1349 * copy the current task into the new thread.
14cf11af 1350 */
55ccf3fe 1351int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
14cf11af 1352{
579e633e 1353 flush_all_to_thread(src);
621b5060
MN
1354 /*
1355 * Flush TM state out so we can copy it. __switch_to_tm() does this
1356 * flush but it removes the checkpointed state from the current CPU and
1357 * transitions the CPU out of TM mode. Hence we need to call
1358 * tm_recheckpoint_new_task() (on the same task) to restore the
1359 * checkpointed state back and the TM mode.
1360 */
1361 __switch_to_tm(src);
1362 tm_recheckpoint_new_task(src);
330a1eb7 1363
55ccf3fe 1364 *dst = *src;
330a1eb7
ME
1365
1366 clear_task_ebb(dst);
1367
55ccf3fe 1368 return 0;
14cf11af
PM
1369}
1370
cec15488
ME
1371static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1372{
1373#ifdef CONFIG_PPC_STD_MMU_64
1374 unsigned long sp_vsid;
1375 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1376
1377 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1378 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1379 << SLB_VSID_SHIFT_1T;
1380 else
1381 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1382 << SLB_VSID_SHIFT;
1383 sp_vsid |= SLB_VSID_KERNEL | llp;
1384 p->thread.ksp_vsid = sp_vsid;
1385#endif
1386}
1387
14cf11af
PM
1388/*
1389 * Copy a thread..
1390 */
efcac658 1391
6eca8933
AD
1392/*
1393 * Copy architecture-specific thread state
1394 */
6f2c55b8 1395int copy_thread(unsigned long clone_flags, unsigned long usp,
6eca8933 1396 unsigned long kthread_arg, struct task_struct *p)
14cf11af
PM
1397{
1398 struct pt_regs *childregs, *kregs;
1399 extern void ret_from_fork(void);
58254e10
AV
1400 extern void ret_from_kernel_thread(void);
1401 void (*f)(void);
0cec6fd1 1402 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
14cf11af 1403
14cf11af
PM
1404 /* Copy registers */
1405 sp -= sizeof(struct pt_regs);
1406 childregs = (struct pt_regs *) sp;
ab75819d 1407 if (unlikely(p->flags & PF_KTHREAD)) {
6eca8933 1408 /* kernel thread */
138d1ce8 1409 struct thread_info *ti = (void *)task_stack_page(p);
58254e10 1410 memset(childregs, 0, sizeof(struct pt_regs));
14cf11af 1411 childregs->gpr[1] = sp + sizeof(struct pt_regs);
7cedd601
AB
1412 /* function */
1413 if (usp)
1414 childregs->gpr[14] = ppc_function_entry((void *)usp);
58254e10 1415#ifdef CONFIG_PPC64
b5e2fc1c 1416 clear_tsk_thread_flag(p, TIF_32BIT);
138d1ce8 1417 childregs->softe = 1;
06d67d54 1418#endif
6eca8933 1419 childregs->gpr[15] = kthread_arg;
14cf11af 1420 p->thread.regs = NULL; /* no user register state */
138d1ce8 1421 ti->flags |= _TIF_RESTOREALL;
58254e10 1422 f = ret_from_kernel_thread;
14cf11af 1423 } else {
6eca8933 1424 /* user thread */
afa86fc4 1425 struct pt_regs *regs = current_pt_regs();
58254e10
AV
1426 CHECK_FULL_REGS(regs);
1427 *childregs = *regs;
ea516b11
AV
1428 if (usp)
1429 childregs->gpr[1] = usp;
14cf11af 1430 p->thread.regs = childregs;
58254e10 1431 childregs->gpr[3] = 0; /* Result from fork() */
06d67d54
PM
1432 if (clone_flags & CLONE_SETTLS) {
1433#ifdef CONFIG_PPC64
9904b005 1434 if (!is_32bit_task())
06d67d54
PM
1435 childregs->gpr[13] = childregs->gpr[6];
1436 else
1437#endif
1438 childregs->gpr[2] = childregs->gpr[6];
1439 }
58254e10
AV
1440
1441 f = ret_from_fork;
14cf11af 1442 }
d272f667 1443 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
14cf11af 1444 sp -= STACK_FRAME_OVERHEAD;
14cf11af
PM
1445
1446 /*
1447 * The way this works is that at some point in the future
1448 * some task will call _switch to switch to the new task.
1449 * That will pop off the stack frame created below and start
1450 * the new task running at ret_from_fork. The new task will
1451 * do some house keeping and then return from the fork or clone
1452 * system call, using the stack frame created above.
1453 */
af945cf4 1454 ((unsigned long *)sp)[0] = 0;
14cf11af
PM
1455 sp -= sizeof(struct pt_regs);
1456 kregs = (struct pt_regs *) sp;
1457 sp -= STACK_FRAME_OVERHEAD;
1458 p->thread.ksp = sp;
cbc9565e 1459#ifdef CONFIG_PPC32
85218827
KG
1460 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1461 _ALIGN_UP(sizeof(struct thread_info), 16);
cbc9565e 1462#endif
28d170ab
ON
1463#ifdef CONFIG_HAVE_HW_BREAKPOINT
1464 p->thread.ptrace_bps[0] = NULL;
1465#endif
1466
18461960
PM
1467 p->thread.fp_save_area = NULL;
1468#ifdef CONFIG_ALTIVEC
1469 p->thread.vr_save_area = NULL;
1470#endif
1471
cec15488
ME
1472 setup_ksp_vsid(p, sp);
1473
efcac658
AK
1474#ifdef CONFIG_PPC64
1475 if (cpu_has_feature(CPU_FTR_DSCR)) {
1021cb26 1476 p->thread.dscr_inherit = current->thread.dscr_inherit;
db1231dc 1477 p->thread.dscr = mfspr(SPRN_DSCR);
efcac658 1478 }
92779245
HM
1479 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1480 p->thread.ppr = INIT_PPR;
efcac658 1481#endif
7cedd601 1482 kregs->nip = ppc_function_entry(f);
14cf11af
PM
1483 return 0;
1484}
1485
1486/*
1487 * Set up a thread for executing a new program
1488 */
06d67d54 1489void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
14cf11af 1490{
90eac727
ME
1491#ifdef CONFIG_PPC64
1492 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1493#endif
1494
06d67d54
PM
1495 /*
1496 * If we exec out of a kernel thread then thread.regs will not be
1497 * set. Do it now.
1498 */
1499 if (!current->thread.regs) {
0cec6fd1
AV
1500 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1501 current->thread.regs = regs - 1;
06d67d54
PM
1502 }
1503
14cf11af
PM
1504 memset(regs->gpr, 0, sizeof(regs->gpr));
1505 regs->ctr = 0;
1506 regs->link = 0;
1507 regs->xer = 0;
1508 regs->ccr = 0;
14cf11af 1509 regs->gpr[1] = sp;
06d67d54 1510
474f8196
RM
1511 /*
1512 * We have just cleared all the nonvolatile GPRs, so make
1513 * FULL_REGS(regs) return true. This is necessary to allow
1514 * ptrace to examine the thread immediately after exec.
1515 */
1516 regs->trap &= ~1UL;
1517
06d67d54
PM
1518#ifdef CONFIG_PPC32
1519 regs->mq = 0;
1520 regs->nip = start;
14cf11af 1521 regs->msr = MSR_USER;
06d67d54 1522#else
9904b005 1523 if (!is_32bit_task()) {
94af3abf 1524 unsigned long entry;
06d67d54 1525
94af3abf
RR
1526 if (is_elf2_task()) {
1527 /* Look ma, no function descriptors! */
1528 entry = start;
06d67d54 1529
94af3abf
RR
1530 /*
1531 * Ulrich says:
1532 * The latest iteration of the ABI requires that when
1533 * calling a function (at its global entry point),
1534 * the caller must ensure r12 holds the entry point
1535 * address (so that the function can quickly
1536 * establish addressability).
1537 */
1538 regs->gpr[12] = start;
1539 /* Make sure that's restored on entry to userspace. */
1540 set_thread_flag(TIF_RESTOREALL);
1541 } else {
1542 unsigned long toc;
1543
1544 /* start is a relocated pointer to the function
1545 * descriptor for the elf _start routine. The first
1546 * entry in the function descriptor is the entry
1547 * address of _start and the second entry is the TOC
1548 * value we need to use.
1549 */
1550 __get_user(entry, (unsigned long __user *)start);
1551 __get_user(toc, (unsigned long __user *)start+1);
1552
1553 /* Check whether the e_entry function descriptor entries
1554 * need to be relocated before we can use them.
1555 */
1556 if (load_addr != 0) {
1557 entry += load_addr;
1558 toc += load_addr;
1559 }
1560 regs->gpr[2] = toc;
06d67d54
PM
1561 }
1562 regs->nip = entry;
06d67d54 1563 regs->msr = MSR_USER64;
d4bf9a78
SR
1564 } else {
1565 regs->nip = start;
1566 regs->gpr[2] = 0;
1567 regs->msr = MSR_USER32;
06d67d54
PM
1568 }
1569#endif
ce48b210
MN
1570#ifdef CONFIG_VSX
1571 current->thread.used_vsr = 0;
1572#endif
de79f7b9 1573 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
18461960 1574 current->thread.fp_save_area = NULL;
14cf11af 1575#ifdef CONFIG_ALTIVEC
de79f7b9
PM
1576 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1577 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
18461960 1578 current->thread.vr_save_area = NULL;
14cf11af
PM
1579 current->thread.vrsave = 0;
1580 current->thread.used_vr = 0;
1581#endif /* CONFIG_ALTIVEC */
1582#ifdef CONFIG_SPE
1583 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1584 current->thread.acc = 0;
1585 current->thread.spefscr = 0;
1586 current->thread.used_spe = 0;
1587#endif /* CONFIG_SPE */
bc2a9408
MN
1588#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1589 if (cpu_has_feature(CPU_FTR_TM))
1590 regs->msr |= MSR_TM;
1591 current->thread.tm_tfhar = 0;
1592 current->thread.tm_texasr = 0;
1593 current->thread.tm_tfiar = 0;
1594#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
14cf11af 1595}
e1802b06 1596EXPORT_SYMBOL(start_thread);
14cf11af
PM
1597
1598#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1599 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1600
1601int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1602{
1603 struct pt_regs *regs = tsk->thread.regs;
1604
1605 /* This is a bit hairy. If we are an SPE enabled processor
1606 * (have embedded fp) we store the IEEE exception enable flags in
1607 * fpexc_mode. fpexc_mode is also used for setting FP exception
1608 * mode (asyn, precise, disabled) for 'Classic' FP. */
1609 if (val & PR_FP_EXC_SW_ENABLE) {
1610#ifdef CONFIG_SPE
5e14d21e 1611 if (cpu_has_feature(CPU_FTR_SPE)) {
640e9225
JM
1612 /*
1613 * When the sticky exception bits are set
1614 * directly by userspace, it must call prctl
1615 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1616 * in the existing prctl settings) or
1617 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1618 * the bits being set). <fenv.h> functions
1619 * saving and restoring the whole
1620 * floating-point environment need to do so
1621 * anyway to restore the prctl settings from
1622 * the saved environment.
1623 */
1624 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
5e14d21e
KG
1625 tsk->thread.fpexc_mode = val &
1626 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1627 return 0;
1628 } else {
1629 return -EINVAL;
1630 }
14cf11af
PM
1631#else
1632 return -EINVAL;
1633#endif
14cf11af 1634 }
06d67d54
PM
1635
1636 /* on a CONFIG_SPE this does not hurt us. The bits that
1637 * __pack_fe01 use do not overlap with bits used for
1638 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1639 * on CONFIG_SPE implementations are reserved so writing to
1640 * them does not change anything */
1641 if (val > PR_FP_EXC_PRECISE)
1642 return -EINVAL;
1643 tsk->thread.fpexc_mode = __pack_fe01(val);
1644 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1645 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1646 | tsk->thread.fpexc_mode;
14cf11af
PM
1647 return 0;
1648}
1649
1650int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1651{
1652 unsigned int val;
1653
1654 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1655#ifdef CONFIG_SPE
640e9225
JM
1656 if (cpu_has_feature(CPU_FTR_SPE)) {
1657 /*
1658 * When the sticky exception bits are set
1659 * directly by userspace, it must call prctl
1660 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1661 * in the existing prctl settings) or
1662 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1663 * the bits being set). <fenv.h> functions
1664 * saving and restoring the whole
1665 * floating-point environment need to do so
1666 * anyway to restore the prctl settings from
1667 * the saved environment.
1668 */
1669 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
5e14d21e 1670 val = tsk->thread.fpexc_mode;
640e9225 1671 } else
5e14d21e 1672 return -EINVAL;
14cf11af
PM
1673#else
1674 return -EINVAL;
1675#endif
1676 else
1677 val = __unpack_fe01(tsk->thread.fpexc_mode);
1678 return put_user(val, (unsigned int __user *) adr);
1679}
1680
fab5db97
PM
1681int set_endian(struct task_struct *tsk, unsigned int val)
1682{
1683 struct pt_regs *regs = tsk->thread.regs;
1684
1685 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1686 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1687 return -EINVAL;
1688
1689 if (regs == NULL)
1690 return -EINVAL;
1691
1692 if (val == PR_ENDIAN_BIG)
1693 regs->msr &= ~MSR_LE;
1694 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1695 regs->msr |= MSR_LE;
1696 else
1697 return -EINVAL;
1698
1699 return 0;
1700}
1701
1702int get_endian(struct task_struct *tsk, unsigned long adr)
1703{
1704 struct pt_regs *regs = tsk->thread.regs;
1705 unsigned int val;
1706
1707 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1708 !cpu_has_feature(CPU_FTR_REAL_LE))
1709 return -EINVAL;
1710
1711 if (regs == NULL)
1712 return -EINVAL;
1713
1714 if (regs->msr & MSR_LE) {
1715 if (cpu_has_feature(CPU_FTR_REAL_LE))
1716 val = PR_ENDIAN_LITTLE;
1717 else
1718 val = PR_ENDIAN_PPC_LITTLE;
1719 } else
1720 val = PR_ENDIAN_BIG;
1721
1722 return put_user(val, (unsigned int __user *)adr);
1723}
1724
e9370ae1
PM
1725int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1726{
1727 tsk->thread.align_ctl = val;
1728 return 0;
1729}
1730
1731int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1732{
1733 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1734}
1735
bb72c481
PM
1736static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1737 unsigned long nbytes)
1738{
1739 unsigned long stack_page;
1740 unsigned long cpu = task_cpu(p);
1741
1742 /*
1743 * Avoid crashing if the stack has overflowed and corrupted
1744 * task_cpu(p), which is in the thread_info struct.
1745 */
1746 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1747 stack_page = (unsigned long) hardirq_ctx[cpu];
1748 if (sp >= stack_page + sizeof(struct thread_struct)
1749 && sp <= stack_page + THREAD_SIZE - nbytes)
1750 return 1;
1751
1752 stack_page = (unsigned long) softirq_ctx[cpu];
1753 if (sp >= stack_page + sizeof(struct thread_struct)
1754 && sp <= stack_page + THREAD_SIZE - nbytes)
1755 return 1;
1756 }
1757 return 0;
1758}
1759
2f25194d 1760int validate_sp(unsigned long sp, struct task_struct *p,
14cf11af
PM
1761 unsigned long nbytes)
1762{
0cec6fd1 1763 unsigned long stack_page = (unsigned long)task_stack_page(p);
14cf11af
PM
1764
1765 if (sp >= stack_page + sizeof(struct thread_struct)
1766 && sp <= stack_page + THREAD_SIZE - nbytes)
1767 return 1;
1768
bb72c481 1769 return valid_irq_stack(sp, p, nbytes);
14cf11af
PM
1770}
1771
2f25194d
AB
1772EXPORT_SYMBOL(validate_sp);
1773
14cf11af
PM
1774unsigned long get_wchan(struct task_struct *p)
1775{
1776 unsigned long ip, sp;
1777 int count = 0;
1778
1779 if (!p || p == current || p->state == TASK_RUNNING)
1780 return 0;
1781
1782 sp = p->thread.ksp;
ec2b36b9 1783 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1784 return 0;
1785
1786 do {
1787 sp = *(unsigned long *)sp;
ec2b36b9 1788 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1789 return 0;
1790 if (count > 0) {
ec2b36b9 1791 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
14cf11af
PM
1792 if (!in_sched_functions(ip))
1793 return ip;
1794 }
1795 } while (count++ < 16);
1796 return 0;
1797}
06d67d54 1798
c4d04be1 1799static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
06d67d54
PM
1800
1801void show_stack(struct task_struct *tsk, unsigned long *stack)
1802{
1803 unsigned long sp, ip, lr, newsp;
1804 int count = 0;
1805 int firstframe = 1;
6794c782
SR
1806#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1807 int curr_frame = current->curr_ret_stack;
1808 extern void return_to_handler(void);
9135c3cc 1809 unsigned long rth = (unsigned long)return_to_handler;
6794c782 1810#endif
06d67d54
PM
1811
1812 sp = (unsigned long) stack;
1813 if (tsk == NULL)
1814 tsk = current;
1815 if (sp == 0) {
1816 if (tsk == current)
acf620ec 1817 sp = current_stack_pointer();
06d67d54
PM
1818 else
1819 sp = tsk->thread.ksp;
1820 }
1821
1822 lr = 0;
1823 printk("Call Trace:\n");
1824 do {
ec2b36b9 1825 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
06d67d54
PM
1826 return;
1827
1828 stack = (unsigned long *) sp;
1829 newsp = stack[0];
ec2b36b9 1830 ip = stack[STACK_FRAME_LR_SAVE];
06d67d54 1831 if (!firstframe || ip != lr) {
058c78f4 1832 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6794c782 1833#ifdef CONFIG_FUNCTION_GRAPH_TRACER
7d56c65a 1834 if ((ip == rth) && curr_frame >= 0) {
6794c782
SR
1835 printk(" (%pS)",
1836 (void *)current->ret_stack[curr_frame].ret);
1837 curr_frame--;
1838 }
1839#endif
06d67d54
PM
1840 if (firstframe)
1841 printk(" (unreliable)");
1842 printk("\n");
1843 }
1844 firstframe = 0;
1845
1846 /*
1847 * See if this is an exception frame.
1848 * We look for the "regshere" marker in the current frame.
1849 */
ec2b36b9
BH
1850 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1851 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
06d67d54
PM
1852 struct pt_regs *regs = (struct pt_regs *)
1853 (sp + STACK_FRAME_OVERHEAD);
06d67d54 1854 lr = regs->link;
9be9be2e 1855 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
058c78f4 1856 regs->trap, (void *)regs->nip, (void *)lr);
06d67d54
PM
1857 firstframe = 1;
1858 }
1859
1860 sp = newsp;
1861 } while (count++ < kstack_depth_to_print);
1862}
1863
cb2c9b27 1864#ifdef CONFIG_PPC64
fe1952fc 1865/* Called with hard IRQs off */
0e37739b 1866void notrace __ppc64_runlatch_on(void)
cb2c9b27 1867{
fe1952fc 1868 struct thread_info *ti = current_thread_info();
cb2c9b27
AB
1869 unsigned long ctrl;
1870
fe1952fc
BH
1871 ctrl = mfspr(SPRN_CTRLF);
1872 ctrl |= CTRL_RUNLATCH;
1873 mtspr(SPRN_CTRLT, ctrl);
cb2c9b27 1874
fae2e0fb 1875 ti->local_flags |= _TLF_RUNLATCH;
cb2c9b27
AB
1876}
1877
fe1952fc 1878/* Called with hard IRQs off */
0e37739b 1879void notrace __ppc64_runlatch_off(void)
cb2c9b27 1880{
fe1952fc 1881 struct thread_info *ti = current_thread_info();
cb2c9b27
AB
1882 unsigned long ctrl;
1883
fae2e0fb 1884 ti->local_flags &= ~_TLF_RUNLATCH;
cb2c9b27 1885
4138d653
AB
1886 ctrl = mfspr(SPRN_CTRLF);
1887 ctrl &= ~CTRL_RUNLATCH;
1888 mtspr(SPRN_CTRLT, ctrl);
cb2c9b27 1889}
fe1952fc 1890#endif /* CONFIG_PPC64 */
f6a61680 1891
d839088c
AB
1892unsigned long arch_align_stack(unsigned long sp)
1893{
1894 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1895 sp -= get_random_int() & ~PAGE_MASK;
1896 return sp & ~0xf;
1897}
912f9ee2
AB
1898
1899static inline unsigned long brk_rnd(void)
1900{
1901 unsigned long rnd = 0;
1902
1903 /* 8MB for 32bit, 1GB for 64bit */
1904 if (is_32bit_task())
5ef11c35 1905 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
912f9ee2 1906 else
5ef11c35 1907 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
912f9ee2
AB
1908
1909 return rnd << PAGE_SHIFT;
1910}
1911
1912unsigned long arch_randomize_brk(struct mm_struct *mm)
1913{
8bbde7a7
AB
1914 unsigned long base = mm->brk;
1915 unsigned long ret;
1916
ce7a35c7 1917#ifdef CONFIG_PPC_STD_MMU_64
8bbde7a7
AB
1918 /*
1919 * If we are using 1TB segments and we are allowed to randomise
1920 * the heap, we can put it above 1TB so it is backed by a 1TB
1921 * segment. Otherwise the heap will be in the bottom 1TB
1922 * which always uses 256MB segments and this may result in a
1923 * performance penalty.
1924 */
1925 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1926 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1927#endif
1928
1929 ret = PAGE_ALIGN(base + brk_rnd());
912f9ee2
AB
1930
1931 if (ret < mm->brk)
1932 return mm->brk;
1933
1934 return ret;
1935}
501cb16d 1936