[PATCH] x86_64: eliminate set_debug()
[linux-2.6-block.git] / arch / x86_64 / kernel / process.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/kernel/process.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 *
9 * X86-64 port
10 * Andi Kleen.
76e4f660
AR
11 *
12 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
14 */
15
16/*
17 * This file handles the architecture-dependent parts of process handling..
18 */
19
20#include <stdarg.h>
21
76e4f660 22#include <linux/cpu.h>
1da177e4
LT
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/elfcore.h>
28#include <linux/smp.h>
29#include <linux/slab.h>
30#include <linux/user.h>
31#include <linux/module.h>
32#include <linux/a.out.h>
33#include <linux/interrupt.h>
34#include <linux/delay.h>
1da177e4
LT
35#include <linux/ptrace.h>
36#include <linux/utsname.h>
37#include <linux/random.h>
73649dab 38#include <linux/kprobes.h>
95833c83 39#include <linux/notifier.h>
1da177e4
LT
40
41#include <asm/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/system.h>
44#include <asm/io.h>
45#include <asm/processor.h>
46#include <asm/i387.h>
47#include <asm/mmu_context.h>
48#include <asm/pda.h>
49#include <asm/prctl.h>
50#include <asm/kdebug.h>
51#include <asm/desc.h>
52#include <asm/proto.h>
53#include <asm/ia32.h>
95833c83 54#include <asm/idle.h>
1da177e4
LT
55
56asmlinkage extern void ret_from_fork(void);
57
58unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
59
1da177e4
LT
60unsigned long boot_option_idle_override = 0;
61EXPORT_SYMBOL(boot_option_idle_override);
62
63/*
64 * Powermanagement idle function, if any..
65 */
66void (*pm_idle)(void);
67static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
68
95833c83
AK
69static struct notifier_block *idle_notifier;
70static DEFINE_SPINLOCK(idle_notifier_lock);
71
72void idle_notifier_register(struct notifier_block *n)
73{
74 unsigned long flags;
75 spin_lock_irqsave(&idle_notifier_lock, flags);
76 notifier_chain_register(&idle_notifier, n);
77 spin_unlock_irqrestore(&idle_notifier_lock, flags);
78}
79EXPORT_SYMBOL_GPL(idle_notifier_register);
80
81void idle_notifier_unregister(struct notifier_block *n)
82{
83 unsigned long flags;
84 spin_lock_irqsave(&idle_notifier_lock, flags);
85 notifier_chain_unregister(&idle_notifier, n);
86 spin_unlock_irqrestore(&idle_notifier_lock, flags);
87}
88EXPORT_SYMBOL(idle_notifier_unregister);
89
90enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
91static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
92
93void enter_idle(void)
94{
95 __get_cpu_var(idle_state) = CPU_IDLE;
96 notifier_call_chain(&idle_notifier, IDLE_START, NULL);
97}
98
99static void __exit_idle(void)
100{
101 __get_cpu_var(idle_state) = CPU_NOT_IDLE;
102 notifier_call_chain(&idle_notifier, IDLE_END, NULL);
103}
104
105/* Called from interrupts to signify idle end */
106void exit_idle(void)
107{
108 if (current->pid | read_pda(irqcount))
109 return;
110 __exit_idle();
111}
112
1da177e4
LT
113/*
114 * We use this if we don't have any better
115 * idle routine..
116 */
cdb04527 117static void default_idle(void)
1da177e4 118{
64c7c8f8
NP
119 local_irq_enable();
120
2d52ede9
AK
121 clear_thread_flag(TIF_POLLING_NRFLAG);
122 smp_mb__after_clear_bit();
123 while (!need_resched()) {
124 local_irq_disable();
125 if (!need_resched())
126 safe_halt();
127 else
128 local_irq_enable();
1da177e4 129 }
2d52ede9 130 set_thread_flag(TIF_POLLING_NRFLAG);
1da177e4
LT
131}
132
133/*
134 * On SMP it's slightly faster (but much more power-consuming!)
135 * to poll the ->need_resched flag instead of waiting for the
136 * cross-CPU IPI to arrive. Use this option with caution.
137 */
138static void poll_idle (void)
139{
1da177e4
LT
140 local_irq_enable();
141
64c7c8f8
NP
142 asm volatile(
143 "2:"
144 "testl %0,%1;"
145 "rep; nop;"
146 "je 2b;"
147 : :
148 "i" (_TIF_NEED_RESCHED),
149 "m" (current_thread_info()->flags));
1da177e4
LT
150}
151
152void cpu_idle_wait(void)
153{
154 unsigned int cpu, this_cpu = get_cpu();
155 cpumask_t map;
156
157 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
158 put_cpu();
159
160 cpus_clear(map);
161 for_each_online_cpu(cpu) {
162 per_cpu(cpu_idle_state, cpu) = 1;
163 cpu_set(cpu, map);
164 }
165
166 __get_cpu_var(cpu_idle_state) = 0;
167
168 wmb();
169 do {
170 ssleep(1);
171 for_each_online_cpu(cpu) {
a88cde13
AK
172 if (cpu_isset(cpu, map) &&
173 !per_cpu(cpu_idle_state, cpu))
1da177e4
LT
174 cpu_clear(cpu, map);
175 }
176 cpus_and(map, map, cpu_online_map);
177 } while (!cpus_empty(map));
178}
179EXPORT_SYMBOL_GPL(cpu_idle_wait);
180
76e4f660
AR
181#ifdef CONFIG_HOTPLUG_CPU
182DECLARE_PER_CPU(int, cpu_state);
183
184#include <asm/nmi.h>
1fa744e6 185/* We halt the CPU with physical CPU hotplug */
76e4f660
AR
186static inline void play_dead(void)
187{
188 idle_task_exit();
189 wbinvd();
190 mb();
191 /* Ack it */
192 __get_cpu_var(cpu_state) = CPU_DEAD;
193
1fa744e6 194 local_irq_disable();
76e4f660 195 while (1)
1fa744e6 196 halt();
76e4f660
AR
197}
198#else
199static inline void play_dead(void)
200{
201 BUG();
202}
203#endif /* CONFIG_HOTPLUG_CPU */
204
1da177e4
LT
205/*
206 * The idle thread. There's no useful work to be
207 * done, so just try to conserve power and have a
208 * low exit latency (ie sit in a loop waiting for
209 * somebody to say that they'd like to reschedule)
210 */
211void cpu_idle (void)
212{
64c7c8f8
NP
213 set_thread_flag(TIF_POLLING_NRFLAG);
214
1da177e4
LT
215 /* endless idle loop with no priority at all */
216 while (1) {
217 while (!need_resched()) {
218 void (*idle)(void);
219
220 if (__get_cpu_var(cpu_idle_state))
221 __get_cpu_var(cpu_idle_state) = 0;
222
223 rmb();
224 idle = pm_idle;
225 if (!idle)
226 idle = default_idle;
76e4f660
AR
227 if (cpu_is_offline(smp_processor_id()))
228 play_dead();
95833c83 229 enter_idle();
1da177e4 230 idle();
95833c83 231 __exit_idle();
1da177e4
LT
232 }
233
5bfb5d69 234 preempt_enable_no_resched();
1da177e4 235 schedule();
5bfb5d69 236 preempt_disable();
1da177e4
LT
237 }
238}
239
240/*
241 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
242 * which can obviate IPI to trigger checking of need_resched.
243 * We execute MONITOR against need_resched and enter optimized wait state
244 * through MWAIT. Whenever someone changes need_resched, we would be woken
245 * up from MWAIT (without an IPI).
246 */
247static void mwait_idle(void)
248{
249 local_irq_enable();
250
64c7c8f8
NP
251 while (!need_resched()) {
252 __monitor((void *)&current_thread_info()->flags, 0, 0);
253 smp_mb();
254 if (need_resched())
255 break;
256 __mwait(0, 0);
1da177e4
LT
257 }
258}
259
e6982c67 260void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
1da177e4
LT
261{
262 static int printed;
263 if (cpu_has(c, X86_FEATURE_MWAIT)) {
264 /*
265 * Skip, if setup has overridden idle.
266 * One CPU supports mwait => All CPUs supports mwait
267 */
268 if (!pm_idle) {
269 if (!printed) {
270 printk("using mwait in idle threads.\n");
271 printed = 1;
272 }
273 pm_idle = mwait_idle;
274 }
275 }
276}
277
278static int __init idle_setup (char *str)
279{
280 if (!strncmp(str, "poll", 4)) {
281 printk("using polling idle threads.\n");
282 pm_idle = poll_idle;
283 }
284
285 boot_option_idle_override = 1;
286 return 1;
287}
288
289__setup("idle=", idle_setup);
290
291/* Prints also some state that isn't saved in the pt_regs */
292void __show_regs(struct pt_regs * regs)
293{
294 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
295 unsigned int fsindex,gsindex;
296 unsigned int ds,cs,es;
297
298 printk("\n");
299 print_modules();
9acf23c4
AK
300 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
301 current->pid, current->comm, print_tainted(),
302 system_utsname.release,
303 (int)strcspn(system_utsname.version, " "),
304 system_utsname.version);
1da177e4
LT
305 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
306 printk_address(regs->rip);
a88cde13
AK
307 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
308 regs->eflags);
1da177e4
LT
309 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
310 regs->rax, regs->rbx, regs->rcx);
311 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
312 regs->rdx, regs->rsi, regs->rdi);
313 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
314 regs->rbp, regs->r8, regs->r9);
315 printk("R10: %016lx R11: %016lx R12: %016lx\n",
316 regs->r10, regs->r11, regs->r12);
317 printk("R13: %016lx R14: %016lx R15: %016lx\n",
318 regs->r13, regs->r14, regs->r15);
319
320 asm("movl %%ds,%0" : "=r" (ds));
321 asm("movl %%cs,%0" : "=r" (cs));
322 asm("movl %%es,%0" : "=r" (es));
323 asm("movl %%fs,%0" : "=r" (fsindex));
324 asm("movl %%gs,%0" : "=r" (gsindex));
325
326 rdmsrl(MSR_FS_BASE, fs);
327 rdmsrl(MSR_GS_BASE, gs);
328 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
329
330 asm("movq %%cr0, %0": "=r" (cr0));
331 asm("movq %%cr2, %0": "=r" (cr2));
332 asm("movq %%cr3, %0": "=r" (cr3));
333 asm("movq %%cr4, %0": "=r" (cr4));
334
335 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
336 fs,fsindex,gs,gsindex,shadowgs);
337 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
338 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
339}
340
341void show_regs(struct pt_regs *regs)
342{
c078d326 343 printk("CPU %d:", smp_processor_id());
1da177e4
LT
344 __show_regs(regs);
345 show_trace(&regs->rsp);
346}
347
348/*
349 * Free current thread data structures etc..
350 */
351void exit_thread(void)
352{
353 struct task_struct *me = current;
354 struct thread_struct *t = &me->thread;
73649dab
RL
355
356 /*
357 * Remove function-return probe instances associated with this task
358 * and put them back on the free list. Do not insert an exit probe for
359 * this function, it will be disabled by kprobe_flush_task if you do.
360 */
361 kprobe_flush_task(me);
362
1da177e4
LT
363 if (me->thread.io_bitmap_ptr) {
364 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
365
366 kfree(t->io_bitmap_ptr);
367 t->io_bitmap_ptr = NULL;
368 /*
369 * Careful, clear this in the TSS too:
370 */
371 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
372 t->io_bitmap_max = 0;
373 put_cpu();
374 }
375}
376
377void flush_thread(void)
378{
379 struct task_struct *tsk = current;
380 struct thread_info *t = current_thread_info();
381
382 if (t->flags & _TIF_ABI_PENDING)
383 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
384
385 tsk->thread.debugreg0 = 0;
386 tsk->thread.debugreg1 = 0;
387 tsk->thread.debugreg2 = 0;
388 tsk->thread.debugreg3 = 0;
389 tsk->thread.debugreg6 = 0;
390 tsk->thread.debugreg7 = 0;
391 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
392 /*
393 * Forget coprocessor state..
394 */
395 clear_fpu(tsk);
396 clear_used_math();
397}
398
399void release_thread(struct task_struct *dead_task)
400{
401 if (dead_task->mm) {
402 if (dead_task->mm->context.size) {
403 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
404 dead_task->comm,
405 dead_task->mm->context.ldt,
406 dead_task->mm->context.size);
407 BUG();
408 }
409 }
410}
411
412static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
413{
414 struct user_desc ud = {
415 .base_addr = addr,
416 .limit = 0xfffff,
417 .seg_32bit = 1,
418 .limit_in_pages = 1,
419 .useable = 1,
420 };
421 struct n_desc_struct *desc = (void *)t->thread.tls_array;
422 desc += tls;
423 desc->a = LDT_entry_a(&ud);
424 desc->b = LDT_entry_b(&ud);
425}
426
427static inline u32 read_32bit_tls(struct task_struct *t, int tls)
428{
429 struct desc_struct *desc = (void *)t->thread.tls_array;
430 desc += tls;
431 return desc->base0 |
432 (((u32)desc->base1) << 16) |
433 (((u32)desc->base2) << 24);
434}
435
436/*
437 * This gets called before we allocate a new thread and copy
438 * the current task into it.
439 */
440void prepare_to_copy(struct task_struct *tsk)
441{
442 unlazy_fpu(tsk);
443}
444
445int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
446 unsigned long unused,
447 struct task_struct * p, struct pt_regs * regs)
448{
449 int err;
450 struct pt_regs * childregs;
451 struct task_struct *me = current;
452
a88cde13 453 childregs = ((struct pt_regs *)
57eafdc2 454 (THREAD_SIZE + task_stack_page(p))) - 1;
1da177e4
LT
455 *childregs = *regs;
456
457 childregs->rax = 0;
458 childregs->rsp = rsp;
a88cde13 459 if (rsp == ~0UL)
1da177e4 460 childregs->rsp = (unsigned long)childregs;
1da177e4
LT
461
462 p->thread.rsp = (unsigned long) childregs;
463 p->thread.rsp0 = (unsigned long) (childregs+1);
464 p->thread.userrsp = me->thread.userrsp;
465
e4f17c43 466 set_tsk_thread_flag(p, TIF_FORK);
1da177e4
LT
467
468 p->thread.fs = me->thread.fs;
469 p->thread.gs = me->thread.gs;
470
fd51f666
L
471 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
472 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
473 asm("mov %%es,%0" : "=m" (p->thread.es));
474 asm("mov %%ds,%0" : "=m" (p->thread.ds));
1da177e4
LT
475
476 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
477 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
478 if (!p->thread.io_bitmap_ptr) {
479 p->thread.io_bitmap_max = 0;
480 return -ENOMEM;
481 }
a88cde13
AK
482 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
483 IO_BITMAP_BYTES);
1da177e4
LT
484 }
485
486 /*
487 * Set a new TLS for the child thread?
488 */
489 if (clone_flags & CLONE_SETTLS) {
490#ifdef CONFIG_IA32_EMULATION
491 if (test_thread_flag(TIF_IA32))
492 err = ia32_child_tls(p, childregs);
493 else
494#endif
495 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
496 if (err)
497 goto out;
498 }
499 err = 0;
500out:
501 if (err && p->thread.io_bitmap_ptr) {
502 kfree(p->thread.io_bitmap_ptr);
503 p->thread.io_bitmap_max = 0;
504 }
505 return err;
506}
507
508/*
509 * This special macro can be used to load a debugging register
510 */
2b514e74 511#define loaddebug(thread,r) set_debugreg(thread->debugreg ## r, r)
1da177e4
LT
512
513/*
514 * switch_to(x,y) should switch tasks from x to y.
515 *
516 * This could still be optimized:
517 * - fold all the options into a flag word and test it with a single test.
518 * - could test fs/gs bitsliced
099f318b
AK
519 *
520 * Kprobes not supported here. Set the probe on schedule instead.
1da177e4 521 */
099f318b 522__kprobes struct task_struct *
a88cde13 523__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4
LT
524{
525 struct thread_struct *prev = &prev_p->thread,
526 *next = &next_p->thread;
527 int cpu = smp_processor_id();
528 struct tss_struct *tss = &per_cpu(init_tss, cpu);
529
530 unlazy_fpu(prev_p);
531
532 /*
533 * Reload esp0, LDT and the page table pointer:
534 */
535 tss->rsp0 = next->rsp0;
536
537 /*
538 * Switch DS and ES.
539 * This won't pick up thread selector changes, but I guess that is ok.
540 */
fd51f666 541 asm volatile("mov %%es,%0" : "=m" (prev->es));
1da177e4
LT
542 if (unlikely(next->es | prev->es))
543 loadsegment(es, next->es);
544
fd51f666 545 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
1da177e4
LT
546 if (unlikely(next->ds | prev->ds))
547 loadsegment(ds, next->ds);
548
549 load_TLS(next, cpu);
550
551 /*
552 * Switch FS and GS.
553 */
554 {
555 unsigned fsindex;
556 asm volatile("movl %%fs,%0" : "=r" (fsindex));
557 /* segment register != 0 always requires a reload.
558 also reload when it has changed.
559 when prev process used 64bit base always reload
560 to avoid an information leak. */
561 if (unlikely(fsindex | next->fsindex | prev->fs)) {
562 loadsegment(fs, next->fsindex);
563 /* check if the user used a selector != 0
564 * if yes clear 64bit base, since overloaded base
565 * is always mapped to the Null selector
566 */
567 if (fsindex)
568 prev->fs = 0;
569 }
570 /* when next process has a 64bit base use it */
571 if (next->fs)
572 wrmsrl(MSR_FS_BASE, next->fs);
573 prev->fsindex = fsindex;
574 }
575 {
576 unsigned gsindex;
577 asm volatile("movl %%gs,%0" : "=r" (gsindex));
578 if (unlikely(gsindex | next->gsindex | prev->gs)) {
579 load_gs_index(next->gsindex);
580 if (gsindex)
581 prev->gs = 0;
582 }
583 if (next->gs)
584 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
585 prev->gsindex = gsindex;
586 }
587
588 /*
589 * Switch the PDA context.
590 */
591 prev->userrsp = read_pda(oldrsp);
592 write_pda(oldrsp, next->userrsp);
593 write_pda(pcurrent, next_p);
a88cde13 594 write_pda(kernelstack,
57eafdc2 595 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
1da177e4
LT
596
597 /*
598 * Now maybe reload the debug registers
599 */
600 if (unlikely(next->debugreg7)) {
601 loaddebug(next, 0);
602 loaddebug(next, 1);
603 loaddebug(next, 2);
604 loaddebug(next, 3);
605 /* no 4 and 5 */
606 loaddebug(next, 6);
607 loaddebug(next, 7);
608 }
609
610
611 /*
612 * Handle the IO bitmap
613 */
614 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
615 if (next->io_bitmap_ptr)
616 /*
617 * Copy the relevant range of the IO bitmap.
618 * Normally this is 128 bytes or less:
619 */
620 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
621 max(prev->io_bitmap_max, next->io_bitmap_max));
622 else {
623 /*
624 * Clear any possible leftover bits:
625 */
626 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
627 }
628 }
629
630 return prev_p;
631}
632
633/*
634 * sys_execve() executes a new program.
635 */
636asmlinkage
637long sys_execve(char __user *name, char __user * __user *argv,
638 char __user * __user *envp, struct pt_regs regs)
639{
640 long error;
641 char * filename;
642
643 filename = getname(name);
644 error = PTR_ERR(filename);
645 if (IS_ERR(filename))
646 return error;
647 error = do_execve(filename, argv, envp, &regs);
648 if (error == 0) {
649 task_lock(current);
650 current->ptrace &= ~PT_DTRACE;
651 task_unlock(current);
652 }
653 putname(filename);
654 return error;
655}
656
657void set_personality_64bit(void)
658{
659 /* inherit personality from parent */
660
661 /* Make sure to be in 64bit mode */
662 clear_thread_flag(TIF_IA32);
663
664 /* TBD: overwrites user setup. Should have two bits.
665 But 64bit processes have always behaved this way,
666 so it's not too bad. The main problem is just that
667 32bit childs are affected again. */
668 current->personality &= ~READ_IMPLIES_EXEC;
669}
670
671asmlinkage long sys_fork(struct pt_regs *regs)
672{
673 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
674}
675
a88cde13
AK
676asmlinkage long
677sys_clone(unsigned long clone_flags, unsigned long newsp,
678 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
1da177e4
LT
679{
680 if (!newsp)
681 newsp = regs->rsp;
682 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
683}
684
685/*
686 * This is trivial, and on the face of it looks like it
687 * could equally well be done in user mode.
688 *
689 * Not so, for quite unobvious reasons - register pressure.
690 * In user mode vfork() cannot have a stack frame, and if
691 * done by calling the "clone()" system call directly, you
692 * do not have enough call-clobbered registers to hold all
693 * the information you need.
694 */
695asmlinkage long sys_vfork(struct pt_regs *regs)
696{
697 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
698 NULL, NULL);
699}
700
701unsigned long get_wchan(struct task_struct *p)
702{
703 unsigned long stack;
704 u64 fp,rip;
705 int count = 0;
706
707 if (!p || p == current || p->state==TASK_RUNNING)
708 return 0;
57eafdc2 709 stack = (unsigned long)task_stack_page(p);
1da177e4
LT
710 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
711 return 0;
712 fp = *(u64 *)(p->thread.rsp);
713 do {
a88cde13
AK
714 if (fp < (unsigned long)stack ||
715 fp > (unsigned long)stack+THREAD_SIZE)
1da177e4
LT
716 return 0;
717 rip = *(u64 *)(fp+8);
718 if (!in_sched_functions(rip))
719 return rip;
720 fp = *(u64 *)fp;
721 } while (count++ < 16);
722 return 0;
723}
724
725long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
726{
727 int ret = 0;
728 int doit = task == current;
729 int cpu;
730
731 switch (code) {
732 case ARCH_SET_GS:
84929801 733 if (addr >= TASK_SIZE_OF(task))
1da177e4
LT
734 return -EPERM;
735 cpu = get_cpu();
736 /* handle small bases via the GDT because that's faster to
737 switch. */
738 if (addr <= 0xffffffff) {
739 set_32bit_tls(task, GS_TLS, addr);
740 if (doit) {
741 load_TLS(&task->thread, cpu);
742 load_gs_index(GS_TLS_SEL);
743 }
744 task->thread.gsindex = GS_TLS_SEL;
745 task->thread.gs = 0;
746 } else {
747 task->thread.gsindex = 0;
748 task->thread.gs = addr;
749 if (doit) {
a88cde13
AK
750 load_gs_index(0);
751 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
1da177e4
LT
752 }
753 }
754 put_cpu();
755 break;
756 case ARCH_SET_FS:
757 /* Not strictly needed for fs, but do it for symmetry
758 with gs */
84929801 759 if (addr >= TASK_SIZE_OF(task))
1da177e4
LT
760 return -EPERM;
761 cpu = get_cpu();
762 /* handle small bases via the GDT because that's faster to
763 switch. */
764 if (addr <= 0xffffffff) {
765 set_32bit_tls(task, FS_TLS, addr);
766 if (doit) {
767 load_TLS(&task->thread, cpu);
a88cde13 768 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
1da177e4
LT
769 }
770 task->thread.fsindex = FS_TLS_SEL;
771 task->thread.fs = 0;
772 } else {
773 task->thread.fsindex = 0;
774 task->thread.fs = addr;
775 if (doit) {
776 /* set the selector to 0 to not confuse
777 __switch_to */
a88cde13
AK
778 asm volatile("movl %0,%%fs" :: "r" (0));
779 ret = checking_wrmsrl(MSR_FS_BASE, addr);
1da177e4
LT
780 }
781 }
782 put_cpu();
783 break;
784 case ARCH_GET_FS: {
785 unsigned long base;
786 if (task->thread.fsindex == FS_TLS_SEL)
787 base = read_32bit_tls(task, FS_TLS);
a88cde13 788 else if (doit)
1da177e4 789 rdmsrl(MSR_FS_BASE, base);
a88cde13 790 else
1da177e4
LT
791 base = task->thread.fs;
792 ret = put_user(base, (unsigned long __user *)addr);
793 break;
794 }
795 case ARCH_GET_GS: {
796 unsigned long base;
797 if (task->thread.gsindex == GS_TLS_SEL)
798 base = read_32bit_tls(task, GS_TLS);
a88cde13 799 else if (doit)
1da177e4 800 rdmsrl(MSR_KERNEL_GS_BASE, base);
a88cde13 801 else
1da177e4
LT
802 base = task->thread.gs;
803 ret = put_user(base, (unsigned long __user *)addr);
804 break;
805 }
806
807 default:
808 ret = -EINVAL;
809 break;
810 }
811
812 return ret;
813}
814
815long sys_arch_prctl(int code, unsigned long addr)
816{
817 return do_arch_prctl(current, code, addr);
818}
819
820/*
821 * Capture the user space registers if the task is not running (in user space)
822 */
823int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
824{
825 struct pt_regs *pp, ptregs;
826
bb049232 827 pp = task_pt_regs(tsk);
1da177e4
LT
828
829 ptregs = *pp;
830 ptregs.cs &= 0xffff;
831 ptregs.ss &= 0xffff;
832
833 elf_core_copy_regs(regs, &ptregs);
834
835 return 1;
836}
837
838unsigned long arch_align_stack(unsigned long sp)
839{
840 if (randomize_va_space)
841 sp -= get_random_int() % 8192;
842 return sp & ~0xf;
843}