powerpc: Prepare for splitting giveup_{fpu, altivec, vsx} in two
[linux-2.6-block.git] / arch / powerpc / kernel / process.c
CommitLineData
14cf11af 1/*
14cf11af
PM
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
14cf11af
PM
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
14cf11af
PM
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/ptrace.h>
25#include <linux/slab.h>
26#include <linux/user.h>
27#include <linux/elf.h>
14cf11af
PM
28#include <linux/prctl.h>
29#include <linux/init_task.h>
4b16f8e2 30#include <linux/export.h>
14cf11af
PM
31#include <linux/kallsyms.h>
32#include <linux/mqueue.h>
33#include <linux/hardirq.h>
06d67d54 34#include <linux/utsname.h>
6794c782 35#include <linux/ftrace.h>
79741dd3 36#include <linux/kernel_stat.h>
d839088c
AB
37#include <linux/personality.h>
38#include <linux/random.h>
5aae8a53 39#include <linux/hw_breakpoint.h>
7b051f66 40#include <linux/uaccess.h>
14cf11af
PM
41
42#include <asm/pgtable.h>
14cf11af
PM
43#include <asm/io.h>
44#include <asm/processor.h>
45#include <asm/mmu.h>
46#include <asm/prom.h>
76032de8 47#include <asm/machdep.h>
c6622f63 48#include <asm/time.h>
ae3a197e 49#include <asm/runlatch.h>
a7f31841 50#include <asm/syscalls.h>
ae3a197e 51#include <asm/switch_to.h>
fb09692e 52#include <asm/tm.h>
ae3a197e 53#include <asm/debug.h>
06d67d54
PM
54#ifdef CONFIG_PPC64
55#include <asm/firmware.h>
06d67d54 56#endif
7cedd601 57#include <asm/code-patching.h>
d6a61bfc
LM
58#include <linux/kprobes.h>
59#include <linux/kdebug.h>
14cf11af 60
8b3c34cf
MN
61/* Transactional Memory debug */
62#ifdef TM_DEBUG_SW
63#define TM_DEBUG(x...) printk(KERN_INFO x)
64#else
65#define TM_DEBUG(x...) do { } while(0)
66#endif
67
14cf11af
PM
68extern unsigned long _get_SP(void);
69
d31626f7 70#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
b86fd2bd 71static void check_if_tm_restore_required(struct task_struct *tsk)
d31626f7
PM
72{
73 /*
74 * If we are saving the current thread's registers, and the
75 * thread is in a transactional state, set the TIF_RESTORE_TM
76 * bit so that we know to restore the registers before
77 * returning to userspace.
78 */
79 if (tsk == current && tsk->thread.regs &&
80 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
81 !test_thread_flag(TIF_RESTORE_TM)) {
829023df 82 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
d31626f7
PM
83 set_thread_flag(TIF_RESTORE_TM);
84 }
d31626f7 85}
d31626f7 86#else
b86fd2bd 87static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
d31626f7
PM
88#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
89
3eb5d588
AB
90bool strict_msr_control;
91EXPORT_SYMBOL(strict_msr_control);
92
93static int __init enable_strict_msr_control(char *str)
94{
95 strict_msr_control = true;
96 pr_info("Enabling strict facility control\n");
97
98 return 0;
99}
100early_param("ppc_strict_facility_enable", enable_strict_msr_control);
101
102void msr_check_and_set(unsigned long bits)
98da581e 103{
a0e72cf1
AB
104 unsigned long oldmsr = mfmsr();
105 unsigned long newmsr;
98da581e 106
a0e72cf1 107 newmsr = oldmsr | bits;
98da581e 108
98da581e 109#ifdef CONFIG_VSX
a0e72cf1 110 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
98da581e
AB
111 newmsr |= MSR_VSX;
112#endif
a0e72cf1 113
98da581e
AB
114 if (oldmsr != newmsr)
115 mtmsr_isync(newmsr);
a0e72cf1 116}
98da581e 117
3eb5d588 118void __msr_check_and_clear(unsigned long bits)
a0e72cf1
AB
119{
120 unsigned long oldmsr = mfmsr();
121 unsigned long newmsr;
122
123 newmsr = oldmsr & ~bits;
124
125#ifdef CONFIG_VSX
126 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
127 newmsr &= ~MSR_VSX;
128#endif
129
130 if (oldmsr != newmsr)
131 mtmsr_isync(newmsr);
132}
3eb5d588 133EXPORT_SYMBOL(__msr_check_and_clear);
a0e72cf1
AB
134
135#ifdef CONFIG_PPC_FPU
136void giveup_fpu(struct task_struct *tsk)
137{
138 check_if_tm_restore_required(tsk);
139
140 msr_check_and_set(MSR_FP);
98da581e 141 __giveup_fpu(tsk);
a0e72cf1 142 msr_check_and_clear(MSR_FP);
98da581e
AB
143}
144EXPORT_SYMBOL(giveup_fpu);
145
14cf11af
PM
146/*
147 * Make sure the floating-point register state in the
148 * the thread_struct is up to date for task tsk.
149 */
150void flush_fp_to_thread(struct task_struct *tsk)
151{
152 if (tsk->thread.regs) {
153 /*
154 * We need to disable preemption here because if we didn't,
155 * another process could get scheduled after the regs->msr
156 * test but before we have finished saving the FP registers
157 * to the thread_struct. That process could take over the
158 * FPU, and then when we get scheduled again we would store
159 * bogus values for the remaining FP registers.
160 */
161 preempt_disable();
162 if (tsk->thread.regs->msr & MSR_FP) {
14cf11af
PM
163 /*
164 * This should only ever be called for current or
165 * for a stopped child process. Since we save away
af1bbc3d 166 * the FP register state on context switch,
14cf11af
PM
167 * there is something wrong if a stopped child appears
168 * to still have its FP state in the CPU registers.
169 */
170 BUG_ON(tsk != current);
b86fd2bd 171 giveup_fpu(tsk);
14cf11af
PM
172 }
173 preempt_enable();
174 }
175}
de56a948 176EXPORT_SYMBOL_GPL(flush_fp_to_thread);
14cf11af
PM
177
178void enable_kernel_fp(void)
179{
180 WARN_ON(preemptible());
181
a0e72cf1 182 msr_check_and_set(MSR_FP);
611b0e5c 183
d64d02ce
AB
184 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
185 check_if_tm_restore_required(current);
a0e72cf1 186 __giveup_fpu(current);
d64d02ce 187 }
14cf11af
PM
188}
189EXPORT_SYMBOL(enable_kernel_fp);
70fe3d98
CB
190
191static int restore_fp(struct task_struct *tsk) {
192 if (tsk->thread.load_fp) {
193 load_fp_state(&current->thread.fp_state);
194 current->thread.load_fp++;
195 return 1;
196 }
197 return 0;
198}
199#else
200static int restore_fp(struct task_struct *tsk) { return 0; }
d1e1cf2e 201#endif /* CONFIG_PPC_FPU */
14cf11af 202
14cf11af 203#ifdef CONFIG_ALTIVEC
70fe3d98
CB
204#define loadvec(thr) ((thr).load_vec)
205
98da581e
AB
206void giveup_altivec(struct task_struct *tsk)
207{
98da581e
AB
208 check_if_tm_restore_required(tsk);
209
a0e72cf1 210 msr_check_and_set(MSR_VEC);
98da581e 211 __giveup_altivec(tsk);
a0e72cf1 212 msr_check_and_clear(MSR_VEC);
98da581e
AB
213}
214EXPORT_SYMBOL(giveup_altivec);
215
14cf11af
PM
216void enable_kernel_altivec(void)
217{
218 WARN_ON(preemptible());
219
a0e72cf1 220 msr_check_and_set(MSR_VEC);
611b0e5c 221
d64d02ce
AB
222 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
223 check_if_tm_restore_required(current);
a0e72cf1 224 __giveup_altivec(current);
d64d02ce 225 }
14cf11af
PM
226}
227EXPORT_SYMBOL(enable_kernel_altivec);
228
229/*
230 * Make sure the VMX/Altivec register state in the
231 * the thread_struct is up to date for task tsk.
232 */
233void flush_altivec_to_thread(struct task_struct *tsk)
234{
235 if (tsk->thread.regs) {
236 preempt_disable();
237 if (tsk->thread.regs->msr & MSR_VEC) {
14cf11af 238 BUG_ON(tsk != current);
b86fd2bd 239 giveup_altivec(tsk);
14cf11af
PM
240 }
241 preempt_enable();
242 }
243}
de56a948 244EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
70fe3d98
CB
245
246static int restore_altivec(struct task_struct *tsk)
247{
248 if (cpu_has_feature(CPU_FTR_ALTIVEC) && tsk->thread.load_vec) {
249 load_vr_state(&tsk->thread.vr_state);
250 tsk->thread.used_vr = 1;
251 tsk->thread.load_vec++;
252
253 return 1;
254 }
255 return 0;
256}
257#else
258#define loadvec(thr) 0
259static inline int restore_altivec(struct task_struct *tsk) { return 0; }
14cf11af
PM
260#endif /* CONFIG_ALTIVEC */
261
ce48b210 262#ifdef CONFIG_VSX
a7d623d4
AB
263void giveup_vsx(struct task_struct *tsk)
264{
a7d623d4
AB
265 check_if_tm_restore_required(tsk);
266
a0e72cf1 267 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
a7d623d4
AB
268 if (tsk->thread.regs->msr & MSR_FP)
269 __giveup_fpu(tsk);
270 if (tsk->thread.regs->msr & MSR_VEC)
271 __giveup_altivec(tsk);
272 __giveup_vsx(tsk);
a0e72cf1 273 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
a7d623d4
AB
274}
275EXPORT_SYMBOL(giveup_vsx);
276
ce48b210
MN
277void enable_kernel_vsx(void)
278{
279 WARN_ON(preemptible());
280
a0e72cf1 281 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
611b0e5c 282
a0e72cf1 283 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) {
d64d02ce 284 check_if_tm_restore_required(current);
a0e72cf1
AB
285 if (current->thread.regs->msr & MSR_FP)
286 __giveup_fpu(current);
287 if (current->thread.regs->msr & MSR_VEC)
288 __giveup_altivec(current);
289 __giveup_vsx(current);
611b0e5c 290 }
ce48b210
MN
291}
292EXPORT_SYMBOL(enable_kernel_vsx);
ce48b210
MN
293
294void flush_vsx_to_thread(struct task_struct *tsk)
295{
296 if (tsk->thread.regs) {
297 preempt_disable();
298 if (tsk->thread.regs->msr & MSR_VSX) {
ce48b210 299 BUG_ON(tsk != current);
ce48b210
MN
300 giveup_vsx(tsk);
301 }
302 preempt_enable();
303 }
304}
de56a948 305EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
70fe3d98
CB
306
307static int restore_vsx(struct task_struct *tsk)
308{
309 if (cpu_has_feature(CPU_FTR_VSX)) {
310 tsk->thread.used_vsr = 1;
311 return 1;
312 }
313
314 return 0;
315}
316#else
317static inline int restore_vsx(struct task_struct *tsk) { return 0; }
ce48b210
MN
318#endif /* CONFIG_VSX */
319
14cf11af 320#ifdef CONFIG_SPE
98da581e
AB
321void giveup_spe(struct task_struct *tsk)
322{
98da581e
AB
323 check_if_tm_restore_required(tsk);
324
a0e72cf1 325 msr_check_and_set(MSR_SPE);
98da581e 326 __giveup_spe(tsk);
a0e72cf1 327 msr_check_and_clear(MSR_SPE);
98da581e
AB
328}
329EXPORT_SYMBOL(giveup_spe);
14cf11af
PM
330
331void enable_kernel_spe(void)
332{
333 WARN_ON(preemptible());
334
a0e72cf1 335 msr_check_and_set(MSR_SPE);
611b0e5c 336
d64d02ce
AB
337 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
338 check_if_tm_restore_required(current);
a0e72cf1 339 __giveup_spe(current);
d64d02ce 340 }
14cf11af
PM
341}
342EXPORT_SYMBOL(enable_kernel_spe);
343
344void flush_spe_to_thread(struct task_struct *tsk)
345{
346 if (tsk->thread.regs) {
347 preempt_disable();
348 if (tsk->thread.regs->msr & MSR_SPE) {
14cf11af 349 BUG_ON(tsk != current);
685659ee 350 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
0ee6c15e 351 giveup_spe(tsk);
14cf11af
PM
352 }
353 preempt_enable();
354 }
355}
14cf11af
PM
356#endif /* CONFIG_SPE */
357
c2085059
AB
358static unsigned long msr_all_available;
359
360static int __init init_msr_all_available(void)
361{
362#ifdef CONFIG_PPC_FPU
363 msr_all_available |= MSR_FP;
364#endif
365#ifdef CONFIG_ALTIVEC
366 if (cpu_has_feature(CPU_FTR_ALTIVEC))
367 msr_all_available |= MSR_VEC;
368#endif
369#ifdef CONFIG_VSX
370 if (cpu_has_feature(CPU_FTR_VSX))
371 msr_all_available |= MSR_VSX;
372#endif
373#ifdef CONFIG_SPE
374 if (cpu_has_feature(CPU_FTR_SPE))
375 msr_all_available |= MSR_SPE;
376#endif
377
378 return 0;
379}
380early_initcall(init_msr_all_available);
381
382void giveup_all(struct task_struct *tsk)
383{
384 unsigned long usermsr;
385
386 if (!tsk->thread.regs)
387 return;
388
389 usermsr = tsk->thread.regs->msr;
390
391 if ((usermsr & msr_all_available) == 0)
392 return;
393
394 msr_check_and_set(msr_all_available);
395
396#ifdef CONFIG_PPC_FPU
397 if (usermsr & MSR_FP)
398 __giveup_fpu(tsk);
399#endif
400#ifdef CONFIG_ALTIVEC
401 if (usermsr & MSR_VEC)
402 __giveup_altivec(tsk);
403#endif
404#ifdef CONFIG_VSX
405 if (usermsr & MSR_VSX)
406 __giveup_vsx(tsk);
407#endif
408#ifdef CONFIG_SPE
409 if (usermsr & MSR_SPE)
410 __giveup_spe(tsk);
411#endif
412
413 msr_check_and_clear(msr_all_available);
414}
415EXPORT_SYMBOL(giveup_all);
416
70fe3d98
CB
417void restore_math(struct pt_regs *regs)
418{
419 unsigned long msr;
420
421 if (!current->thread.load_fp && !loadvec(current->thread))
422 return;
423
424 msr = regs->msr;
425 msr_check_and_set(msr_all_available);
426
427 /*
428 * Only reload if the bit is not set in the user MSR, the bit BEING set
429 * indicates that the registers are hot
430 */
431 if ((!(msr & MSR_FP)) && restore_fp(current))
432 msr |= MSR_FP | current->thread.fpexc_mode;
433
434 if ((!(msr & MSR_VEC)) && restore_altivec(current))
435 msr |= MSR_VEC;
436
437 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
438 restore_vsx(current)) {
439 msr |= MSR_VSX;
440 }
441
442 msr_check_and_clear(msr_all_available);
443
444 regs->msr = msr;
445}
446
de2a20aa
CB
447void save_all(struct task_struct *tsk)
448{
449 unsigned long usermsr;
450
451 if (!tsk->thread.regs)
452 return;
453
454 usermsr = tsk->thread.regs->msr;
455
456 if ((usermsr & msr_all_available) == 0)
457 return;
458
459 msr_check_and_set(msr_all_available);
460
461 if (usermsr & MSR_FP)
462 __giveup_fpu(tsk);
463
464 if (usermsr & MSR_VEC)
465 __giveup_altivec(tsk);
466
467 if (usermsr & MSR_VSX)
468 __giveup_vsx(tsk);
469
470 if (usermsr & MSR_SPE)
471 __giveup_spe(tsk);
472
473 msr_check_and_clear(msr_all_available);
474}
475
579e633e
AB
476void flush_all_to_thread(struct task_struct *tsk)
477{
478 if (tsk->thread.regs) {
479 preempt_disable();
480 BUG_ON(tsk != current);
de2a20aa 481 save_all(tsk);
579e633e
AB
482
483#ifdef CONFIG_SPE
484 if (tsk->thread.regs->msr & MSR_SPE)
485 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
486#endif
487
488 preempt_enable();
489 }
490}
491EXPORT_SYMBOL(flush_all_to_thread);
492
3bffb652
DK
493#ifdef CONFIG_PPC_ADV_DEBUG_REGS
494void do_send_trap(struct pt_regs *regs, unsigned long address,
495 unsigned long error_code, int signal_code, int breakpt)
496{
497 siginfo_t info;
498
41ab5266 499 current->thread.trap_nr = signal_code;
3bffb652
DK
500 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
501 11, SIGSEGV) == NOTIFY_STOP)
502 return;
503
504 /* Deliver the signal to userspace */
505 info.si_signo = SIGTRAP;
506 info.si_errno = breakpt; /* breakpoint or watchpoint id */
507 info.si_code = signal_code;
508 info.si_addr = (void __user *)address;
509 force_sig_info(SIGTRAP, &info, current);
510}
511#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
9422de3e 512void do_break (struct pt_regs *regs, unsigned long address,
d6a61bfc
LM
513 unsigned long error_code)
514{
515 siginfo_t info;
516
41ab5266 517 current->thread.trap_nr = TRAP_HWBKPT;
d6a61bfc
LM
518 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
519 11, SIGSEGV) == NOTIFY_STOP)
520 return;
521
9422de3e 522 if (debugger_break_match(regs))
d6a61bfc
LM
523 return;
524
9422de3e
MN
525 /* Clear the breakpoint */
526 hw_breakpoint_disable();
d6a61bfc
LM
527
528 /* Deliver the signal to userspace */
529 info.si_signo = SIGTRAP;
530 info.si_errno = 0;
531 info.si_code = TRAP_HWBKPT;
532 info.si_addr = (void __user *)address;
533 force_sig_info(SIGTRAP, &info, current);
534}
3bffb652 535#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
d6a61bfc 536
9422de3e 537static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
a2ceff5e 538
3bffb652
DK
539#ifdef CONFIG_PPC_ADV_DEBUG_REGS
540/*
541 * Set the debug registers back to their default "safe" values.
542 */
543static void set_debug_reg_defaults(struct thread_struct *thread)
544{
51ae8d4a 545 thread->debug.iac1 = thread->debug.iac2 = 0;
3bffb652 546#if CONFIG_PPC_ADV_DEBUG_IACS > 2
51ae8d4a 547 thread->debug.iac3 = thread->debug.iac4 = 0;
3bffb652 548#endif
51ae8d4a 549 thread->debug.dac1 = thread->debug.dac2 = 0;
3bffb652 550#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
51ae8d4a 551 thread->debug.dvc1 = thread->debug.dvc2 = 0;
3bffb652 552#endif
51ae8d4a 553 thread->debug.dbcr0 = 0;
3bffb652
DK
554#ifdef CONFIG_BOOKE
555 /*
556 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
557 */
51ae8d4a 558 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
3bffb652
DK
559 DBCR1_IAC3US | DBCR1_IAC4US;
560 /*
561 * Force Data Address Compare User/Supervisor bits to be User-only
562 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
563 */
51ae8d4a 564 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
3bffb652 565#else
51ae8d4a 566 thread->debug.dbcr1 = 0;
3bffb652
DK
567#endif
568}
569
f5f97210 570static void prime_debug_regs(struct debug_reg *debug)
3bffb652 571{
6cecf76b
SW
572 /*
573 * We could have inherited MSR_DE from userspace, since
574 * it doesn't get cleared on exception entry. Make sure
575 * MSR_DE is clear before we enable any debug events.
576 */
577 mtmsr(mfmsr() & ~MSR_DE);
578
f5f97210
SW
579 mtspr(SPRN_IAC1, debug->iac1);
580 mtspr(SPRN_IAC2, debug->iac2);
3bffb652 581#if CONFIG_PPC_ADV_DEBUG_IACS > 2
f5f97210
SW
582 mtspr(SPRN_IAC3, debug->iac3);
583 mtspr(SPRN_IAC4, debug->iac4);
3bffb652 584#endif
f5f97210
SW
585 mtspr(SPRN_DAC1, debug->dac1);
586 mtspr(SPRN_DAC2, debug->dac2);
3bffb652 587#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
f5f97210
SW
588 mtspr(SPRN_DVC1, debug->dvc1);
589 mtspr(SPRN_DVC2, debug->dvc2);
3bffb652 590#endif
f5f97210
SW
591 mtspr(SPRN_DBCR0, debug->dbcr0);
592 mtspr(SPRN_DBCR1, debug->dbcr1);
3bffb652 593#ifdef CONFIG_BOOKE
f5f97210 594 mtspr(SPRN_DBCR2, debug->dbcr2);
3bffb652
DK
595#endif
596}
597/*
598 * Unless neither the old or new thread are making use of the
599 * debug registers, set the debug registers from the values
600 * stored in the new thread.
601 */
f5f97210 602void switch_booke_debug_regs(struct debug_reg *new_debug)
3bffb652 603{
51ae8d4a 604 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
f5f97210
SW
605 || (new_debug->dbcr0 & DBCR0_IDM))
606 prime_debug_regs(new_debug);
3bffb652 607}
3743c9b8 608EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
3bffb652 609#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
e0780b72 610#ifndef CONFIG_HAVE_HW_BREAKPOINT
3bffb652
DK
611static void set_debug_reg_defaults(struct thread_struct *thread)
612{
9422de3e
MN
613 thread->hw_brk.address = 0;
614 thread->hw_brk.type = 0;
b9818c33 615 set_breakpoint(&thread->hw_brk);
3bffb652 616}
e0780b72 617#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
3bffb652
DK
618#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
619
172ae2e7 620#ifdef CONFIG_PPC_ADV_DEBUG_REGS
9422de3e
MN
621static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
622{
d6a61bfc 623 mtspr(SPRN_DAC1, dabr);
221c185d
DK
624#ifdef CONFIG_PPC_47x
625 isync();
626#endif
9422de3e
MN
627 return 0;
628}
c6c9eace 629#elif defined(CONFIG_PPC_BOOK3S)
9422de3e
MN
630static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
631{
c6c9eace 632 mtspr(SPRN_DABR, dabr);
82a9f16a
MN
633 if (cpu_has_feature(CPU_FTR_DABRX))
634 mtspr(SPRN_DABRX, dabrx);
cab0af98 635 return 0;
14cf11af 636}
9422de3e
MN
637#else
638static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
639{
640 return -EINVAL;
641}
642#endif
643
644static inline int set_dabr(struct arch_hw_breakpoint *brk)
645{
646 unsigned long dabr, dabrx;
647
648 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
649 dabrx = ((brk->type >> 3) & 0x7);
650
651 if (ppc_md.set_dabr)
652 return ppc_md.set_dabr(dabr, dabrx);
653
654 return __set_dabr(dabr, dabrx);
655}
656
bf99de36
MN
657static inline int set_dawr(struct arch_hw_breakpoint *brk)
658{
05d694ea 659 unsigned long dawr, dawrx, mrd;
bf99de36
MN
660
661 dawr = brk->address;
662
663 dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
664 << (63 - 58); //* read/write bits */
665 dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
666 << (63 - 59); //* translate */
667 dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
668 >> 3; //* PRIM bits */
05d694ea
MN
669 /* dawr length is stored in field MDR bits 48:53. Matches range in
670 doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
671 0b111111=64DW.
672 brk->len is in bytes.
673 This aligns up to double word size, shifts and does the bias.
674 */
675 mrd = ((brk->len + 7) >> 3) - 1;
676 dawrx |= (mrd & 0x3f) << (63 - 53);
bf99de36
MN
677
678 if (ppc_md.set_dawr)
679 return ppc_md.set_dawr(dawr, dawrx);
680 mtspr(SPRN_DAWR, dawr);
681 mtspr(SPRN_DAWRX, dawrx);
682 return 0;
683}
684
21f58507 685void __set_breakpoint(struct arch_hw_breakpoint *brk)
9422de3e 686{
69111bac 687 memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
9422de3e 688
bf99de36 689 if (cpu_has_feature(CPU_FTR_DAWR))
04c32a51
PG
690 set_dawr(brk);
691 else
692 set_dabr(brk);
9422de3e 693}
14cf11af 694
21f58507
PG
695void set_breakpoint(struct arch_hw_breakpoint *brk)
696{
697 preempt_disable();
698 __set_breakpoint(brk);
699 preempt_enable();
700}
701
06d67d54
PM
702#ifdef CONFIG_PPC64
703DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
06d67d54 704#endif
14cf11af 705
9422de3e
MN
706static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
707 struct arch_hw_breakpoint *b)
708{
709 if (a->address != b->address)
710 return false;
711 if (a->type != b->type)
712 return false;
713 if (a->len != b->len)
714 return false;
715 return true;
716}
d31626f7 717
fb09692e 718#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
d31626f7
PM
719static void tm_reclaim_thread(struct thread_struct *thr,
720 struct thread_info *ti, uint8_t cause)
721{
722 unsigned long msr_diff = 0;
723
724 /*
725 * If FP/VSX registers have been already saved to the
726 * thread_struct, move them to the transact_fp array.
727 * We clear the TIF_RESTORE_TM bit since after the reclaim
728 * the thread will no longer be transactional.
729 */
730 if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
829023df 731 msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr;
d31626f7
PM
732 if (msr_diff & MSR_FP)
733 memcpy(&thr->transact_fp, &thr->fp_state,
734 sizeof(struct thread_fp_state));
735 if (msr_diff & MSR_VEC)
736 memcpy(&thr->transact_vr, &thr->vr_state,
737 sizeof(struct thread_vr_state));
738 clear_ti_thread_flag(ti, TIF_RESTORE_TM);
739 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
740 }
741
7f821fc9
MN
742 /*
743 * Use the current MSR TM suspended bit to track if we have
744 * checkpointed state outstanding.
745 * On signal delivery, we'd normally reclaim the checkpointed
746 * state to obtain stack pointer (see:get_tm_stackpointer()).
747 * This will then directly return to userspace without going
748 * through __switch_to(). However, if the stack frame is bad,
749 * we need to exit this thread which calls __switch_to() which
750 * will again attempt to reclaim the already saved tm state.
751 * Hence we need to check that we've not already reclaimed
752 * this state.
753 * We do this using the current MSR, rather tracking it in
754 * some specific thread_struct bit, as it has the additional
755 * benifit of checking for a potential TM bad thing exception.
756 */
757 if (!MSR_TM_SUSPENDED(mfmsr()))
758 return;
759
d31626f7
PM
760 tm_reclaim(thr, thr->regs->msr, cause);
761
762 /* Having done the reclaim, we now have the checkpointed
763 * FP/VSX values in the registers. These might be valid
764 * even if we have previously called enable_kernel_fp() or
765 * flush_fp_to_thread(), so update thr->regs->msr to
766 * indicate their current validity.
767 */
768 thr->regs->msr |= msr_diff;
769}
770
771void tm_reclaim_current(uint8_t cause)
772{
773 tm_enable();
774 tm_reclaim_thread(&current->thread, current_thread_info(), cause);
775}
776
fb09692e
MN
777static inline void tm_reclaim_task(struct task_struct *tsk)
778{
779 /* We have to work out if we're switching from/to a task that's in the
780 * middle of a transaction.
781 *
782 * In switching we need to maintain a 2nd register state as
783 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
784 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
785 * (current) FPRs into oldtask->thread.transact_fpr[].
786 *
787 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
788 */
789 struct thread_struct *thr = &tsk->thread;
790
791 if (!thr->regs)
792 return;
793
794 if (!MSR_TM_ACTIVE(thr->regs->msr))
795 goto out_and_saveregs;
796
797 /* Stash the original thread MSR, as giveup_fpu et al will
798 * modify it. We hold onto it to see whether the task used
d31626f7 799 * FP & vector regs. If the TIF_RESTORE_TM flag is set,
829023df 800 * ckpt_regs.msr is already set.
fb09692e 801 */
d31626f7 802 if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
829023df 803 thr->ckpt_regs.msr = thr->regs->msr;
fb09692e
MN
804
805 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
806 "ccr=%lx, msr=%lx, trap=%lx)\n",
807 tsk->pid, thr->regs->nip,
808 thr->regs->ccr, thr->regs->msr,
809 thr->regs->trap);
810
d31626f7 811 tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
fb09692e
MN
812
813 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
814 tsk->pid);
815
816out_and_saveregs:
817 /* Always save the regs here, even if a transaction's not active.
818 * This context-switches a thread's TM info SPRs. We do it here to
819 * be consistent with the restore path (in recheckpoint) which
820 * cannot happen later in _switch().
821 */
822 tm_save_sprs(thr);
823}
824
e6b8fd02
MN
825extern void __tm_recheckpoint(struct thread_struct *thread,
826 unsigned long orig_msr);
827
828void tm_recheckpoint(struct thread_struct *thread,
829 unsigned long orig_msr)
830{
831 unsigned long flags;
832
833 /* We really can't be interrupted here as the TEXASR registers can't
834 * change and later in the trecheckpoint code, we have a userspace R1.
835 * So let's hard disable over this region.
836 */
837 local_irq_save(flags);
838 hard_irq_disable();
839
840 /* The TM SPRs are restored here, so that TEXASR.FS can be set
841 * before the trecheckpoint and no explosion occurs.
842 */
843 tm_restore_sprs(thread);
844
845 __tm_recheckpoint(thread, orig_msr);
846
847 local_irq_restore(flags);
848}
849
bc2a9408 850static inline void tm_recheckpoint_new_task(struct task_struct *new)
fb09692e
MN
851{
852 unsigned long msr;
853
854 if (!cpu_has_feature(CPU_FTR_TM))
855 return;
856
857 /* Recheckpoint the registers of the thread we're about to switch to.
858 *
859 * If the task was using FP, we non-lazily reload both the original and
860 * the speculative FP register states. This is because the kernel
861 * doesn't see if/when a TM rollback occurs, so if we take an FP
862 * unavoidable later, we are unable to determine which set of FP regs
863 * need to be restored.
864 */
865 if (!new->thread.regs)
866 return;
867
e6b8fd02
MN
868 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
869 tm_restore_sprs(&new->thread);
fb09692e 870 return;
e6b8fd02 871 }
829023df 872 msr = new->thread.ckpt_regs.msr;
fb09692e
MN
873 /* Recheckpoint to restore original checkpointed register state. */
874 TM_DEBUG("*** tm_recheckpoint of pid %d "
875 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
876 new->pid, new->thread.regs->msr, msr);
877
878 /* This loads the checkpointed FP/VEC state, if used */
879 tm_recheckpoint(&new->thread, msr);
880
881 /* This loads the speculative FP/VEC state, if used */
882 if (msr & MSR_FP) {
883 do_load_up_transact_fpu(&new->thread);
884 new->thread.regs->msr |=
885 (MSR_FP | new->thread.fpexc_mode);
886 }
f110c0c1 887#ifdef CONFIG_ALTIVEC
fb09692e
MN
888 if (msr & MSR_VEC) {
889 do_load_up_transact_altivec(&new->thread);
890 new->thread.regs->msr |= MSR_VEC;
891 }
f110c0c1 892#endif
fb09692e
MN
893 /* We may as well turn on VSX too since all the state is restored now */
894 if (msr & MSR_VSX)
895 new->thread.regs->msr |= MSR_VSX;
896
897 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
898 "(kernel msr 0x%lx)\n",
899 new->pid, mfmsr());
900}
901
902static inline void __switch_to_tm(struct task_struct *prev)
903{
904 if (cpu_has_feature(CPU_FTR_TM)) {
905 tm_enable();
906 tm_reclaim_task(prev);
907 }
908}
d31626f7
PM
909
910/*
911 * This is called if we are on the way out to userspace and the
912 * TIF_RESTORE_TM flag is set. It checks if we need to reload
913 * FP and/or vector state and does so if necessary.
914 * If userspace is inside a transaction (whether active or
915 * suspended) and FP/VMX/VSX instructions have ever been enabled
916 * inside that transaction, then we have to keep them enabled
917 * and keep the FP/VMX/VSX state loaded while ever the transaction
918 * continues. The reason is that if we didn't, and subsequently
919 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
920 * we don't know whether it's the same transaction, and thus we
921 * don't know which of the checkpointed state and the transactional
922 * state to use.
923 */
924void restore_tm_state(struct pt_regs *regs)
925{
926 unsigned long msr_diff;
927
928 clear_thread_flag(TIF_RESTORE_TM);
929 if (!MSR_TM_ACTIVE(regs->msr))
930 return;
931
829023df 932 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
d31626f7 933 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
70fe3d98
CB
934
935 restore_math(regs);
936
d31626f7
PM
937 regs->msr |= msr_diff;
938}
939
fb09692e
MN
940#else
941#define tm_recheckpoint_new_task(new)
942#define __switch_to_tm(prev)
943#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
9422de3e 944
152d523e
AB
945static inline void save_sprs(struct thread_struct *t)
946{
947#ifdef CONFIG_ALTIVEC
948 if (cpu_has_feature(cpu_has_feature(CPU_FTR_ALTIVEC)))
949 t->vrsave = mfspr(SPRN_VRSAVE);
950#endif
951#ifdef CONFIG_PPC_BOOK3S_64
952 if (cpu_has_feature(CPU_FTR_DSCR))
953 t->dscr = mfspr(SPRN_DSCR);
954
955 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
956 t->bescr = mfspr(SPRN_BESCR);
957 t->ebbhr = mfspr(SPRN_EBBHR);
958 t->ebbrr = mfspr(SPRN_EBBRR);
959
960 t->fscr = mfspr(SPRN_FSCR);
961
962 /*
963 * Note that the TAR is not available for use in the kernel.
964 * (To provide this, the TAR should be backed up/restored on
965 * exception entry/exit instead, and be in pt_regs. FIXME,
966 * this should be in pt_regs anyway (for debug).)
967 */
968 t->tar = mfspr(SPRN_TAR);
969 }
970#endif
971}
972
973static inline void restore_sprs(struct thread_struct *old_thread,
974 struct thread_struct *new_thread)
975{
976#ifdef CONFIG_ALTIVEC
977 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
978 old_thread->vrsave != new_thread->vrsave)
979 mtspr(SPRN_VRSAVE, new_thread->vrsave);
980#endif
981#ifdef CONFIG_PPC_BOOK3S_64
982 if (cpu_has_feature(CPU_FTR_DSCR)) {
983 u64 dscr = get_paca()->dscr_default;
984 u64 fscr = old_thread->fscr & ~FSCR_DSCR;
985
986 if (new_thread->dscr_inherit) {
987 dscr = new_thread->dscr;
988 fscr |= FSCR_DSCR;
989 }
990
991 if (old_thread->dscr != dscr)
992 mtspr(SPRN_DSCR, dscr);
993
994 if (old_thread->fscr != fscr)
995 mtspr(SPRN_FSCR, fscr);
996 }
997
998 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
999 if (old_thread->bescr != new_thread->bescr)
1000 mtspr(SPRN_BESCR, new_thread->bescr);
1001 if (old_thread->ebbhr != new_thread->ebbhr)
1002 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1003 if (old_thread->ebbrr != new_thread->ebbrr)
1004 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1005
1006 if (old_thread->tar != new_thread->tar)
1007 mtspr(SPRN_TAR, new_thread->tar);
1008 }
1009#endif
1010}
1011
14cf11af
PM
1012struct task_struct *__switch_to(struct task_struct *prev,
1013 struct task_struct *new)
1014{
1015 struct thread_struct *new_thread, *old_thread;
14cf11af 1016 struct task_struct *last;
d6bf29b4
PZ
1017#ifdef CONFIG_PPC_BOOK3S_64
1018 struct ppc64_tlb_batch *batch;
1019#endif
14cf11af 1020
152d523e
AB
1021 new_thread = &new->thread;
1022 old_thread = &current->thread;
1023
7ba5fef7
MN
1024 WARN_ON(!irqs_disabled());
1025
06d67d54
PM
1026#ifdef CONFIG_PPC64
1027 /*
1028 * Collect processor utilization data per process
1029 */
1030 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
69111bac 1031 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
06d67d54
PM
1032 long unsigned start_tb, current_tb;
1033 start_tb = old_thread->start_tb;
1034 cu->current_tb = current_tb = mfspr(SPRN_PURR);
1035 old_thread->accum_tb += (current_tb - start_tb);
1036 new_thread->start_tb = current_tb;
1037 }
d6bf29b4
PZ
1038#endif /* CONFIG_PPC64 */
1039
1040#ifdef CONFIG_PPC_BOOK3S_64
69111bac 1041 batch = this_cpu_ptr(&ppc64_tlb_batch);
d6bf29b4
PZ
1042 if (batch->active) {
1043 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1044 if (batch->index)
1045 __flush_tlb_pending(batch);
1046 batch->active = 0;
1047 }
1048#endif /* CONFIG_PPC_BOOK3S_64 */
06d67d54 1049
f3d885cc
AB
1050#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1051 switch_booke_debug_regs(&new->thread.debug);
1052#else
1053/*
1054 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1055 * schedule DABR
1056 */
1057#ifndef CONFIG_HAVE_HW_BREAKPOINT
1058 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
1059 __set_breakpoint(&new->thread.hw_brk);
1060#endif /* CONFIG_HAVE_HW_BREAKPOINT */
1061#endif
1062
1063 /*
1064 * We need to save SPRs before treclaim/trecheckpoint as these will
1065 * change a number of them.
1066 */
1067 save_sprs(&prev->thread);
1068
1069 __switch_to_tm(prev);
1070
1071 /* Save FPU, Altivec, VSX and SPE state */
1072 giveup_all(prev);
1073
44387e9f
AB
1074 /*
1075 * We can't take a PMU exception inside _switch() since there is a
1076 * window where the kernel stack SLB and the kernel stack are out
1077 * of sync. Hard disable here.
1078 */
1079 hard_irq_disable();
bc2a9408
MN
1080
1081 tm_recheckpoint_new_task(new);
1082
20dbe670
AB
1083 /*
1084 * Call restore_sprs() before calling _switch(). If we move it after
1085 * _switch() then we miss out on calling it for new tasks. The reason
1086 * for this is we manually create a stack frame for new tasks that
1087 * directly returns through ret_from_fork() or
1088 * ret_from_kernel_thread(). See copy_thread() for details.
1089 */
f3d885cc
AB
1090 restore_sprs(old_thread, new_thread);
1091
20dbe670
AB
1092 last = _switch(old_thread, new_thread);
1093
d6bf29b4
PZ
1094#ifdef CONFIG_PPC_BOOK3S_64
1095 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1096 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
69111bac 1097 batch = this_cpu_ptr(&ppc64_tlb_batch);
d6bf29b4
PZ
1098 batch->active = 1;
1099 }
70fe3d98
CB
1100
1101 if (current_thread_info()->task->thread.regs)
1102 restore_math(current_thread_info()->task->thread.regs);
1103
d6bf29b4
PZ
1104#endif /* CONFIG_PPC_BOOK3S_64 */
1105
14cf11af
PM
1106 return last;
1107}
1108
06d67d54
PM
1109static int instructions_to_print = 16;
1110
06d67d54
PM
1111static void show_instructions(struct pt_regs *regs)
1112{
1113 int i;
1114 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
1115 sizeof(int));
1116
1117 printk("Instruction dump:");
1118
1119 for (i = 0; i < instructions_to_print; i++) {
1120 int instr;
1121
1122 if (!(i % 8))
1123 printk("\n");
1124
0de2d820
SW
1125#if !defined(CONFIG_BOOKE)
1126 /* If executing with the IMMU off, adjust pc rather
1127 * than print XXXXXXXX.
1128 */
1129 if (!(regs->msr & MSR_IR))
1130 pc = (unsigned long)phys_to_virt(pc);
1131#endif
1132
00ae36de 1133 if (!__kernel_text_address(pc) ||
7b051f66 1134 probe_kernel_address((unsigned int __user *)pc, instr)) {
40c8cefa 1135 printk(KERN_CONT "XXXXXXXX ");
06d67d54
PM
1136 } else {
1137 if (regs->nip == pc)
40c8cefa 1138 printk(KERN_CONT "<%08x> ", instr);
06d67d54 1139 else
40c8cefa 1140 printk(KERN_CONT "%08x ", instr);
06d67d54
PM
1141 }
1142
1143 pc += sizeof(int);
1144 }
1145
1146 printk("\n");
1147}
1148
801c0b2c 1149struct regbit {
06d67d54
PM
1150 unsigned long bit;
1151 const char *name;
801c0b2c
MN
1152};
1153
1154static struct regbit msr_bits[] = {
3bfd0c9c
AB
1155#if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1156 {MSR_SF, "SF"},
1157 {MSR_HV, "HV"},
1158#endif
1159 {MSR_VEC, "VEC"},
1160 {MSR_VSX, "VSX"},
1161#ifdef CONFIG_BOOKE
1162 {MSR_CE, "CE"},
1163#endif
06d67d54
PM
1164 {MSR_EE, "EE"},
1165 {MSR_PR, "PR"},
1166 {MSR_FP, "FP"},
1167 {MSR_ME, "ME"},
3bfd0c9c 1168#ifdef CONFIG_BOOKE
1b98326b 1169 {MSR_DE, "DE"},
3bfd0c9c
AB
1170#else
1171 {MSR_SE, "SE"},
1172 {MSR_BE, "BE"},
1173#endif
06d67d54
PM
1174 {MSR_IR, "IR"},
1175 {MSR_DR, "DR"},
3bfd0c9c
AB
1176 {MSR_PMM, "PMM"},
1177#ifndef CONFIG_BOOKE
1178 {MSR_RI, "RI"},
1179 {MSR_LE, "LE"},
1180#endif
06d67d54
PM
1181 {0, NULL}
1182};
1183
801c0b2c 1184static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
06d67d54 1185{
801c0b2c 1186 const char *s = "";
06d67d54 1187
06d67d54
PM
1188 for (; bits->bit; ++bits)
1189 if (val & bits->bit) {
801c0b2c
MN
1190 printk("%s%s", s, bits->name);
1191 s = sep;
06d67d54 1192 }
801c0b2c
MN
1193}
1194
1195#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1196static struct regbit msr_tm_bits[] = {
1197 {MSR_TS_T, "T"},
1198 {MSR_TS_S, "S"},
1199 {MSR_TM, "E"},
1200 {0, NULL}
1201};
1202
1203static void print_tm_bits(unsigned long val)
1204{
1205/*
1206 * This only prints something if at least one of the TM bit is set.
1207 * Inside the TM[], the output means:
1208 * E: Enabled (bit 32)
1209 * S: Suspended (bit 33)
1210 * T: Transactional (bit 34)
1211 */
1212 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1213 printk(",TM[");
1214 print_bits(val, msr_tm_bits, "");
1215 printk("]");
1216 }
1217}
1218#else
1219static void print_tm_bits(unsigned long val) {}
1220#endif
1221
1222static void print_msr_bits(unsigned long val)
1223{
1224 printk("<");
1225 print_bits(val, msr_bits, ",");
1226 print_tm_bits(val);
06d67d54
PM
1227 printk(">");
1228}
1229
1230#ifdef CONFIG_PPC64
f6f7dde3 1231#define REG "%016lx"
06d67d54
PM
1232#define REGS_PER_LINE 4
1233#define LAST_VOLATILE 13
1234#else
f6f7dde3 1235#define REG "%08lx"
06d67d54
PM
1236#define REGS_PER_LINE 8
1237#define LAST_VOLATILE 12
1238#endif
1239
14cf11af
PM
1240void show_regs(struct pt_regs * regs)
1241{
1242 int i, trap;
1243
a43cb95d
TH
1244 show_regs_print_info(KERN_DEFAULT);
1245
06d67d54
PM
1246 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1247 regs->nip, regs->link, regs->ctr);
1248 printk("REGS: %p TRAP: %04lx %s (%s)\n",
96b644bd 1249 regs, regs->trap, print_tainted(), init_utsname()->release);
06d67d54 1250 printk("MSR: "REG" ", regs->msr);
801c0b2c 1251 print_msr_bits(regs->msr);
f6f7dde3 1252 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
14cf11af 1253 trap = TRAP(regs);
5115a026 1254 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
9db8bcfd 1255 printk("CFAR: "REG" ", regs->orig_gpr3);
c5400649 1256 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
ba28c9aa 1257#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
9db8bcfd 1258 printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
14170789 1259#else
9db8bcfd
AB
1260 printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1261#endif
1262#ifdef CONFIG_PPC64
1263 printk("SOFTE: %ld ", regs->softe);
1264#endif
1265#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
6d888d1a
AB
1266 if (MSR_TM_ACTIVE(regs->msr))
1267 printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
14170789 1268#endif
14cf11af
PM
1269
1270 for (i = 0; i < 32; i++) {
06d67d54 1271 if ((i % REGS_PER_LINE) == 0)
a2367194 1272 printk("\nGPR%02d: ", i);
06d67d54
PM
1273 printk(REG " ", regs->gpr[i]);
1274 if (i == LAST_VOLATILE && !FULL_REGS(regs))
14cf11af
PM
1275 break;
1276 }
1277 printk("\n");
1278#ifdef CONFIG_KALLSYMS
1279 /*
1280 * Lookup NIP late so we have the best change of getting the
1281 * above info out without failing
1282 */
058c78f4
BH
1283 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1284 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
afc07701 1285#endif
14cf11af 1286 show_stack(current, (unsigned long *) regs->gpr[1]);
06d67d54
PM
1287 if (!user_mode(regs))
1288 show_instructions(regs);
14cf11af
PM
1289}
1290
1291void exit_thread(void)
1292{
14cf11af
PM
1293}
1294
1295void flush_thread(void)
1296{
e0780b72 1297#ifdef CONFIG_HAVE_HW_BREAKPOINT
5aae8a53 1298 flush_ptrace_hw_breakpoint(current);
e0780b72 1299#else /* CONFIG_HAVE_HW_BREAKPOINT */
3bffb652 1300 set_debug_reg_defaults(&current->thread);
e0780b72 1301#endif /* CONFIG_HAVE_HW_BREAKPOINT */
14cf11af
PM
1302}
1303
1304void
1305release_thread(struct task_struct *t)
1306{
1307}
1308
1309/*
55ccf3fe
SS
1310 * this gets called so that we can store coprocessor state into memory and
1311 * copy the current task into the new thread.
14cf11af 1312 */
55ccf3fe 1313int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
14cf11af 1314{
579e633e 1315 flush_all_to_thread(src);
621b5060
MN
1316 /*
1317 * Flush TM state out so we can copy it. __switch_to_tm() does this
1318 * flush but it removes the checkpointed state from the current CPU and
1319 * transitions the CPU out of TM mode. Hence we need to call
1320 * tm_recheckpoint_new_task() (on the same task) to restore the
1321 * checkpointed state back and the TM mode.
1322 */
1323 __switch_to_tm(src);
1324 tm_recheckpoint_new_task(src);
330a1eb7 1325
55ccf3fe 1326 *dst = *src;
330a1eb7
ME
1327
1328 clear_task_ebb(dst);
1329
55ccf3fe 1330 return 0;
14cf11af
PM
1331}
1332
cec15488
ME
1333static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1334{
1335#ifdef CONFIG_PPC_STD_MMU_64
1336 unsigned long sp_vsid;
1337 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1338
1339 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1340 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1341 << SLB_VSID_SHIFT_1T;
1342 else
1343 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1344 << SLB_VSID_SHIFT;
1345 sp_vsid |= SLB_VSID_KERNEL | llp;
1346 p->thread.ksp_vsid = sp_vsid;
1347#endif
1348}
1349
14cf11af
PM
1350/*
1351 * Copy a thread..
1352 */
efcac658 1353
6eca8933
AD
1354/*
1355 * Copy architecture-specific thread state
1356 */
6f2c55b8 1357int copy_thread(unsigned long clone_flags, unsigned long usp,
6eca8933 1358 unsigned long kthread_arg, struct task_struct *p)
14cf11af
PM
1359{
1360 struct pt_regs *childregs, *kregs;
1361 extern void ret_from_fork(void);
58254e10
AV
1362 extern void ret_from_kernel_thread(void);
1363 void (*f)(void);
0cec6fd1 1364 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
14cf11af 1365
14cf11af
PM
1366 /* Copy registers */
1367 sp -= sizeof(struct pt_regs);
1368 childregs = (struct pt_regs *) sp;
ab75819d 1369 if (unlikely(p->flags & PF_KTHREAD)) {
6eca8933 1370 /* kernel thread */
138d1ce8 1371 struct thread_info *ti = (void *)task_stack_page(p);
58254e10 1372 memset(childregs, 0, sizeof(struct pt_regs));
14cf11af 1373 childregs->gpr[1] = sp + sizeof(struct pt_regs);
7cedd601
AB
1374 /* function */
1375 if (usp)
1376 childregs->gpr[14] = ppc_function_entry((void *)usp);
58254e10 1377#ifdef CONFIG_PPC64
b5e2fc1c 1378 clear_tsk_thread_flag(p, TIF_32BIT);
138d1ce8 1379 childregs->softe = 1;
06d67d54 1380#endif
6eca8933 1381 childregs->gpr[15] = kthread_arg;
14cf11af 1382 p->thread.regs = NULL; /* no user register state */
138d1ce8 1383 ti->flags |= _TIF_RESTOREALL;
58254e10 1384 f = ret_from_kernel_thread;
14cf11af 1385 } else {
6eca8933 1386 /* user thread */
afa86fc4 1387 struct pt_regs *regs = current_pt_regs();
58254e10
AV
1388 CHECK_FULL_REGS(regs);
1389 *childregs = *regs;
ea516b11
AV
1390 if (usp)
1391 childregs->gpr[1] = usp;
14cf11af 1392 p->thread.regs = childregs;
58254e10 1393 childregs->gpr[3] = 0; /* Result from fork() */
06d67d54
PM
1394 if (clone_flags & CLONE_SETTLS) {
1395#ifdef CONFIG_PPC64
9904b005 1396 if (!is_32bit_task())
06d67d54
PM
1397 childregs->gpr[13] = childregs->gpr[6];
1398 else
1399#endif
1400 childregs->gpr[2] = childregs->gpr[6];
1401 }
58254e10
AV
1402
1403 f = ret_from_fork;
14cf11af 1404 }
d272f667 1405 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
14cf11af 1406 sp -= STACK_FRAME_OVERHEAD;
14cf11af
PM
1407
1408 /*
1409 * The way this works is that at some point in the future
1410 * some task will call _switch to switch to the new task.
1411 * That will pop off the stack frame created below and start
1412 * the new task running at ret_from_fork. The new task will
1413 * do some house keeping and then return from the fork or clone
1414 * system call, using the stack frame created above.
1415 */
af945cf4 1416 ((unsigned long *)sp)[0] = 0;
14cf11af
PM
1417 sp -= sizeof(struct pt_regs);
1418 kregs = (struct pt_regs *) sp;
1419 sp -= STACK_FRAME_OVERHEAD;
1420 p->thread.ksp = sp;
cbc9565e 1421#ifdef CONFIG_PPC32
85218827
KG
1422 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
1423 _ALIGN_UP(sizeof(struct thread_info), 16);
cbc9565e 1424#endif
28d170ab
ON
1425#ifdef CONFIG_HAVE_HW_BREAKPOINT
1426 p->thread.ptrace_bps[0] = NULL;
1427#endif
1428
18461960
PM
1429 p->thread.fp_save_area = NULL;
1430#ifdef CONFIG_ALTIVEC
1431 p->thread.vr_save_area = NULL;
1432#endif
1433
cec15488
ME
1434 setup_ksp_vsid(p, sp);
1435
efcac658
AK
1436#ifdef CONFIG_PPC64
1437 if (cpu_has_feature(CPU_FTR_DSCR)) {
1021cb26 1438 p->thread.dscr_inherit = current->thread.dscr_inherit;
db1231dc 1439 p->thread.dscr = mfspr(SPRN_DSCR);
efcac658 1440 }
92779245
HM
1441 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1442 p->thread.ppr = INIT_PPR;
efcac658 1443#endif
7cedd601 1444 kregs->nip = ppc_function_entry(f);
14cf11af
PM
1445 return 0;
1446}
1447
1448/*
1449 * Set up a thread for executing a new program
1450 */
06d67d54 1451void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
14cf11af 1452{
90eac727
ME
1453#ifdef CONFIG_PPC64
1454 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1455#endif
1456
06d67d54
PM
1457 /*
1458 * If we exec out of a kernel thread then thread.regs will not be
1459 * set. Do it now.
1460 */
1461 if (!current->thread.regs) {
0cec6fd1
AV
1462 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1463 current->thread.regs = regs - 1;
06d67d54
PM
1464 }
1465
14cf11af
PM
1466 memset(regs->gpr, 0, sizeof(regs->gpr));
1467 regs->ctr = 0;
1468 regs->link = 0;
1469 regs->xer = 0;
1470 regs->ccr = 0;
14cf11af 1471 regs->gpr[1] = sp;
06d67d54 1472
474f8196
RM
1473 /*
1474 * We have just cleared all the nonvolatile GPRs, so make
1475 * FULL_REGS(regs) return true. This is necessary to allow
1476 * ptrace to examine the thread immediately after exec.
1477 */
1478 regs->trap &= ~1UL;
1479
06d67d54
PM
1480#ifdef CONFIG_PPC32
1481 regs->mq = 0;
1482 regs->nip = start;
14cf11af 1483 regs->msr = MSR_USER;
06d67d54 1484#else
9904b005 1485 if (!is_32bit_task()) {
94af3abf 1486 unsigned long entry;
06d67d54 1487
94af3abf
RR
1488 if (is_elf2_task()) {
1489 /* Look ma, no function descriptors! */
1490 entry = start;
06d67d54 1491
94af3abf
RR
1492 /*
1493 * Ulrich says:
1494 * The latest iteration of the ABI requires that when
1495 * calling a function (at its global entry point),
1496 * the caller must ensure r12 holds the entry point
1497 * address (so that the function can quickly
1498 * establish addressability).
1499 */
1500 regs->gpr[12] = start;
1501 /* Make sure that's restored on entry to userspace. */
1502 set_thread_flag(TIF_RESTOREALL);
1503 } else {
1504 unsigned long toc;
1505
1506 /* start is a relocated pointer to the function
1507 * descriptor for the elf _start routine. The first
1508 * entry in the function descriptor is the entry
1509 * address of _start and the second entry is the TOC
1510 * value we need to use.
1511 */
1512 __get_user(entry, (unsigned long __user *)start);
1513 __get_user(toc, (unsigned long __user *)start+1);
1514
1515 /* Check whether the e_entry function descriptor entries
1516 * need to be relocated before we can use them.
1517 */
1518 if (load_addr != 0) {
1519 entry += load_addr;
1520 toc += load_addr;
1521 }
1522 regs->gpr[2] = toc;
06d67d54
PM
1523 }
1524 regs->nip = entry;
06d67d54 1525 regs->msr = MSR_USER64;
d4bf9a78
SR
1526 } else {
1527 regs->nip = start;
1528 regs->gpr[2] = 0;
1529 regs->msr = MSR_USER32;
06d67d54
PM
1530 }
1531#endif
ce48b210
MN
1532#ifdef CONFIG_VSX
1533 current->thread.used_vsr = 0;
1534#endif
de79f7b9 1535 memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
18461960 1536 current->thread.fp_save_area = NULL;
14cf11af 1537#ifdef CONFIG_ALTIVEC
de79f7b9
PM
1538 memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
1539 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
18461960 1540 current->thread.vr_save_area = NULL;
14cf11af
PM
1541 current->thread.vrsave = 0;
1542 current->thread.used_vr = 0;
1543#endif /* CONFIG_ALTIVEC */
1544#ifdef CONFIG_SPE
1545 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1546 current->thread.acc = 0;
1547 current->thread.spefscr = 0;
1548 current->thread.used_spe = 0;
1549#endif /* CONFIG_SPE */
bc2a9408
MN
1550#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1551 if (cpu_has_feature(CPU_FTR_TM))
1552 regs->msr |= MSR_TM;
1553 current->thread.tm_tfhar = 0;
1554 current->thread.tm_texasr = 0;
1555 current->thread.tm_tfiar = 0;
1556#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
14cf11af 1557}
e1802b06 1558EXPORT_SYMBOL(start_thread);
14cf11af
PM
1559
1560#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1561 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1562
1563int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1564{
1565 struct pt_regs *regs = tsk->thread.regs;
1566
1567 /* This is a bit hairy. If we are an SPE enabled processor
1568 * (have embedded fp) we store the IEEE exception enable flags in
1569 * fpexc_mode. fpexc_mode is also used for setting FP exception
1570 * mode (asyn, precise, disabled) for 'Classic' FP. */
1571 if (val & PR_FP_EXC_SW_ENABLE) {
1572#ifdef CONFIG_SPE
5e14d21e 1573 if (cpu_has_feature(CPU_FTR_SPE)) {
640e9225
JM
1574 /*
1575 * When the sticky exception bits are set
1576 * directly by userspace, it must call prctl
1577 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1578 * in the existing prctl settings) or
1579 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1580 * the bits being set). <fenv.h> functions
1581 * saving and restoring the whole
1582 * floating-point environment need to do so
1583 * anyway to restore the prctl settings from
1584 * the saved environment.
1585 */
1586 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
5e14d21e
KG
1587 tsk->thread.fpexc_mode = val &
1588 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1589 return 0;
1590 } else {
1591 return -EINVAL;
1592 }
14cf11af
PM
1593#else
1594 return -EINVAL;
1595#endif
14cf11af 1596 }
06d67d54
PM
1597
1598 /* on a CONFIG_SPE this does not hurt us. The bits that
1599 * __pack_fe01 use do not overlap with bits used for
1600 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1601 * on CONFIG_SPE implementations are reserved so writing to
1602 * them does not change anything */
1603 if (val > PR_FP_EXC_PRECISE)
1604 return -EINVAL;
1605 tsk->thread.fpexc_mode = __pack_fe01(val);
1606 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1607 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1608 | tsk->thread.fpexc_mode;
14cf11af
PM
1609 return 0;
1610}
1611
1612int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1613{
1614 unsigned int val;
1615
1616 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1617#ifdef CONFIG_SPE
640e9225
JM
1618 if (cpu_has_feature(CPU_FTR_SPE)) {
1619 /*
1620 * When the sticky exception bits are set
1621 * directly by userspace, it must call prctl
1622 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1623 * in the existing prctl settings) or
1624 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1625 * the bits being set). <fenv.h> functions
1626 * saving and restoring the whole
1627 * floating-point environment need to do so
1628 * anyway to restore the prctl settings from
1629 * the saved environment.
1630 */
1631 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
5e14d21e 1632 val = tsk->thread.fpexc_mode;
640e9225 1633 } else
5e14d21e 1634 return -EINVAL;
14cf11af
PM
1635#else
1636 return -EINVAL;
1637#endif
1638 else
1639 val = __unpack_fe01(tsk->thread.fpexc_mode);
1640 return put_user(val, (unsigned int __user *) adr);
1641}
1642
fab5db97
PM
1643int set_endian(struct task_struct *tsk, unsigned int val)
1644{
1645 struct pt_regs *regs = tsk->thread.regs;
1646
1647 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1648 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1649 return -EINVAL;
1650
1651 if (regs == NULL)
1652 return -EINVAL;
1653
1654 if (val == PR_ENDIAN_BIG)
1655 regs->msr &= ~MSR_LE;
1656 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1657 regs->msr |= MSR_LE;
1658 else
1659 return -EINVAL;
1660
1661 return 0;
1662}
1663
1664int get_endian(struct task_struct *tsk, unsigned long adr)
1665{
1666 struct pt_regs *regs = tsk->thread.regs;
1667 unsigned int val;
1668
1669 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1670 !cpu_has_feature(CPU_FTR_REAL_LE))
1671 return -EINVAL;
1672
1673 if (regs == NULL)
1674 return -EINVAL;
1675
1676 if (regs->msr & MSR_LE) {
1677 if (cpu_has_feature(CPU_FTR_REAL_LE))
1678 val = PR_ENDIAN_LITTLE;
1679 else
1680 val = PR_ENDIAN_PPC_LITTLE;
1681 } else
1682 val = PR_ENDIAN_BIG;
1683
1684 return put_user(val, (unsigned int __user *)adr);
1685}
1686
e9370ae1
PM
1687int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1688{
1689 tsk->thread.align_ctl = val;
1690 return 0;
1691}
1692
1693int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1694{
1695 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1696}
1697
bb72c481
PM
1698static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1699 unsigned long nbytes)
1700{
1701 unsigned long stack_page;
1702 unsigned long cpu = task_cpu(p);
1703
1704 /*
1705 * Avoid crashing if the stack has overflowed and corrupted
1706 * task_cpu(p), which is in the thread_info struct.
1707 */
1708 if (cpu < NR_CPUS && cpu_possible(cpu)) {
1709 stack_page = (unsigned long) hardirq_ctx[cpu];
1710 if (sp >= stack_page + sizeof(struct thread_struct)
1711 && sp <= stack_page + THREAD_SIZE - nbytes)
1712 return 1;
1713
1714 stack_page = (unsigned long) softirq_ctx[cpu];
1715 if (sp >= stack_page + sizeof(struct thread_struct)
1716 && sp <= stack_page + THREAD_SIZE - nbytes)
1717 return 1;
1718 }
1719 return 0;
1720}
1721
2f25194d 1722int validate_sp(unsigned long sp, struct task_struct *p,
14cf11af
PM
1723 unsigned long nbytes)
1724{
0cec6fd1 1725 unsigned long stack_page = (unsigned long)task_stack_page(p);
14cf11af
PM
1726
1727 if (sp >= stack_page + sizeof(struct thread_struct)
1728 && sp <= stack_page + THREAD_SIZE - nbytes)
1729 return 1;
1730
bb72c481 1731 return valid_irq_stack(sp, p, nbytes);
14cf11af
PM
1732}
1733
2f25194d
AB
1734EXPORT_SYMBOL(validate_sp);
1735
14cf11af
PM
1736unsigned long get_wchan(struct task_struct *p)
1737{
1738 unsigned long ip, sp;
1739 int count = 0;
1740
1741 if (!p || p == current || p->state == TASK_RUNNING)
1742 return 0;
1743
1744 sp = p->thread.ksp;
ec2b36b9 1745 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1746 return 0;
1747
1748 do {
1749 sp = *(unsigned long *)sp;
ec2b36b9 1750 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
14cf11af
PM
1751 return 0;
1752 if (count > 0) {
ec2b36b9 1753 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
14cf11af
PM
1754 if (!in_sched_functions(ip))
1755 return ip;
1756 }
1757 } while (count++ < 16);
1758 return 0;
1759}
06d67d54 1760
c4d04be1 1761static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
06d67d54
PM
1762
1763void show_stack(struct task_struct *tsk, unsigned long *stack)
1764{
1765 unsigned long sp, ip, lr, newsp;
1766 int count = 0;
1767 int firstframe = 1;
6794c782
SR
1768#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1769 int curr_frame = current->curr_ret_stack;
1770 extern void return_to_handler(void);
9135c3cc 1771 unsigned long rth = (unsigned long)return_to_handler;
6794c782 1772#endif
06d67d54
PM
1773
1774 sp = (unsigned long) stack;
1775 if (tsk == NULL)
1776 tsk = current;
1777 if (sp == 0) {
1778 if (tsk == current)
acf620ec 1779 sp = current_stack_pointer();
06d67d54
PM
1780 else
1781 sp = tsk->thread.ksp;
1782 }
1783
1784 lr = 0;
1785 printk("Call Trace:\n");
1786 do {
ec2b36b9 1787 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
06d67d54
PM
1788 return;
1789
1790 stack = (unsigned long *) sp;
1791 newsp = stack[0];
ec2b36b9 1792 ip = stack[STACK_FRAME_LR_SAVE];
06d67d54 1793 if (!firstframe || ip != lr) {
058c78f4 1794 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6794c782 1795#ifdef CONFIG_FUNCTION_GRAPH_TRACER
7d56c65a 1796 if ((ip == rth) && curr_frame >= 0) {
6794c782
SR
1797 printk(" (%pS)",
1798 (void *)current->ret_stack[curr_frame].ret);
1799 curr_frame--;
1800 }
1801#endif
06d67d54
PM
1802 if (firstframe)
1803 printk(" (unreliable)");
1804 printk("\n");
1805 }
1806 firstframe = 0;
1807
1808 /*
1809 * See if this is an exception frame.
1810 * We look for the "regshere" marker in the current frame.
1811 */
ec2b36b9
BH
1812 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1813 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
06d67d54
PM
1814 struct pt_regs *regs = (struct pt_regs *)
1815 (sp + STACK_FRAME_OVERHEAD);
06d67d54 1816 lr = regs->link;
9be9be2e 1817 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
058c78f4 1818 regs->trap, (void *)regs->nip, (void *)lr);
06d67d54
PM
1819 firstframe = 1;
1820 }
1821
1822 sp = newsp;
1823 } while (count++ < kstack_depth_to_print);
1824}
1825
cb2c9b27 1826#ifdef CONFIG_PPC64
fe1952fc 1827/* Called with hard IRQs off */
0e37739b 1828void notrace __ppc64_runlatch_on(void)
cb2c9b27 1829{
fe1952fc 1830 struct thread_info *ti = current_thread_info();
cb2c9b27
AB
1831 unsigned long ctrl;
1832
fe1952fc
BH
1833 ctrl = mfspr(SPRN_CTRLF);
1834 ctrl |= CTRL_RUNLATCH;
1835 mtspr(SPRN_CTRLT, ctrl);
cb2c9b27 1836
fae2e0fb 1837 ti->local_flags |= _TLF_RUNLATCH;
cb2c9b27
AB
1838}
1839
fe1952fc 1840/* Called with hard IRQs off */
0e37739b 1841void notrace __ppc64_runlatch_off(void)
cb2c9b27 1842{
fe1952fc 1843 struct thread_info *ti = current_thread_info();
cb2c9b27
AB
1844 unsigned long ctrl;
1845
fae2e0fb 1846 ti->local_flags &= ~_TLF_RUNLATCH;
cb2c9b27 1847
4138d653
AB
1848 ctrl = mfspr(SPRN_CTRLF);
1849 ctrl &= ~CTRL_RUNLATCH;
1850 mtspr(SPRN_CTRLT, ctrl);
cb2c9b27 1851}
fe1952fc 1852#endif /* CONFIG_PPC64 */
f6a61680 1853
d839088c
AB
1854unsigned long arch_align_stack(unsigned long sp)
1855{
1856 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1857 sp -= get_random_int() & ~PAGE_MASK;
1858 return sp & ~0xf;
1859}
912f9ee2
AB
1860
1861static inline unsigned long brk_rnd(void)
1862{
1863 unsigned long rnd = 0;
1864
1865 /* 8MB for 32bit, 1GB for 64bit */
1866 if (is_32bit_task())
1867 rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
1868 else
1869 rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
1870
1871 return rnd << PAGE_SHIFT;
1872}
1873
1874unsigned long arch_randomize_brk(struct mm_struct *mm)
1875{
8bbde7a7
AB
1876 unsigned long base = mm->brk;
1877 unsigned long ret;
1878
ce7a35c7 1879#ifdef CONFIG_PPC_STD_MMU_64
8bbde7a7
AB
1880 /*
1881 * If we are using 1TB segments and we are allowed to randomise
1882 * the heap, we can put it above 1TB so it is backed by a 1TB
1883 * segment. Otherwise the heap will be in the bottom 1TB
1884 * which always uses 256MB segments and this may result in a
1885 * performance penalty.
1886 */
1887 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1888 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1889#endif
1890
1891 ret = PAGE_ALIGN(base + brk_rnd());
912f9ee2
AB
1892
1893 if (ret < mm->brk)
1894 return mm->brk;
1895
1896 return ret;
1897}
501cb16d 1898