2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/stackprotector.h>
18 #include <linux/cpu.h>
19 #include <linux/errno.h>
20 #include <linux/sched.h>
22 #include <linux/kernel.h>
24 #include <linux/elfcore.h>
25 #include <linux/smp.h>
26 #include <linux/slab.h>
27 #include <linux/user.h>
28 #include <linux/interrupt.h>
29 #include <linux/delay.h>
30 #include <linux/module.h>
31 #include <linux/ptrace.h>
32 #include <linux/notifier.h>
33 #include <linux/kprobes.h>
34 #include <linux/kdebug.h>
35 #include <linux/tick.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/cpuidle.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/processor.h>
46 #include <asm/fpu-internal.h>
47 #include <asm/mmu_context.h>
48 #include <asm/prctl.h>
50 #include <asm/proto.h>
53 #include <asm/syscalls.h>
54 #include <asm/debugreg.h>
57 asmlinkage extern void ret_from_fork(void);
59 DEFINE_PER_CPU(unsigned long, old_rsp);
60 static DEFINE_PER_CPU(unsigned char, is_idle);
62 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
64 void idle_notifier_register(struct notifier_block *n)
66 atomic_notifier_chain_register(&idle_notifier, n);
68 EXPORT_SYMBOL_GPL(idle_notifier_register);
70 void idle_notifier_unregister(struct notifier_block *n)
72 atomic_notifier_chain_unregister(&idle_notifier, n);
74 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
78 percpu_write(is_idle, 1);
79 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
82 static void __exit_idle(void)
84 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
86 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
89 /* Called from interrupts to signify idle end */
92 /* idle loop has pid 0 */
99 static inline void play_dead(void)
106 * The idle thread. There's no useful work to be
107 * done, so just try to conserve power and have a
108 * low exit latency (ie sit in a loop waiting for
109 * somebody to say that they'd like to reschedule)
113 current_thread_info()->status |= TS_POLLING;
116 * If we're the non-boot CPU, nothing set the stack canary up
117 * for us. CPU0 already has it initialized but no harm in
118 * doing it again. This is a good place for updating it, as
119 * we wont ever return from this function (so the invalid
120 * canaries already on the stack wont ever trigger).
122 boot_init_stack_canary();
124 /* endless idle loop with no priority at all */
126 tick_nohz_idle_enter();
127 while (!need_resched()) {
131 if (cpu_is_offline(smp_processor_id()))
134 * Idle routines should keep interrupts disabled
135 * from here on, until they go to idle.
136 * Otherwise, idle callbacks can misfire.
141 /* Don't trace irqs off for idle */
142 stop_critical_timings();
144 /* enter_idle() needs rcu for notifiers */
147 if (cpuidle_idle_call())
151 start_critical_timings();
153 /* In many cases the interrupt that ended idle
154 has already called exit_idle. But some idle
155 loops can be woken up without interrupt. */
159 tick_nohz_idle_exit();
160 preempt_enable_no_resched();
166 /* Prints also some state that isn't saved in the pt_regs */
167 void __show_regs(struct pt_regs *regs, int all)
169 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
170 unsigned long d0, d1, d2, d3, d6, d7;
171 unsigned int fsindex, gsindex;
172 unsigned int ds, cs, es;
175 printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
176 printk_address(regs->ip, 1);
177 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss,
178 regs->sp, regs->flags);
179 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
180 regs->ax, regs->bx, regs->cx);
181 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
182 regs->dx, regs->si, regs->di);
183 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
184 regs->bp, regs->r8, regs->r9);
185 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
186 regs->r10, regs->r11, regs->r12);
187 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
188 regs->r13, regs->r14, regs->r15);
190 asm("movl %%ds,%0" : "=r" (ds));
191 asm("movl %%cs,%0" : "=r" (cs));
192 asm("movl %%es,%0" : "=r" (es));
193 asm("movl %%fs,%0" : "=r" (fsindex));
194 asm("movl %%gs,%0" : "=r" (gsindex));
196 rdmsrl(MSR_FS_BASE, fs);
197 rdmsrl(MSR_GS_BASE, gs);
198 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
208 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
209 fs, fsindex, gs, gsindex, shadowgs);
210 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
212 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
218 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
222 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
225 void release_thread(struct task_struct *dead_task)
228 if (dead_task->mm->context.size) {
229 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
231 dead_task->mm->context.ldt,
232 dead_task->mm->context.size);
238 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
240 struct user_desc ud = {
247 struct desc_struct *desc = t->thread.tls_array;
252 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
254 return get_desc_base(&t->thread.tls_array[tls]);
258 * This gets called before we allocate a new thread and copy
259 * the current task into it.
261 void prepare_to_copy(struct task_struct *tsk)
266 int copy_thread(unsigned long clone_flags, unsigned long sp,
267 unsigned long unused,
268 struct task_struct *p, struct pt_regs *regs)
271 struct pt_regs *childregs;
272 struct task_struct *me = current;
274 childregs = ((struct pt_regs *)
275 (THREAD_SIZE + task_stack_page(p))) - 1;
282 childregs->sp = (unsigned long)childregs;
284 p->thread.sp = (unsigned long) childregs;
285 p->thread.sp0 = (unsigned long) (childregs+1);
286 p->thread.usersp = me->thread.usersp;
288 set_tsk_thread_flag(p, TIF_FORK);
291 p->thread.io_bitmap_ptr = NULL;
293 savesegment(gs, p->thread.gsindex);
294 p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs;
295 savesegment(fs, p->thread.fsindex);
296 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
297 savesegment(es, p->thread.es);
298 savesegment(ds, p->thread.ds);
301 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
303 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
304 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
305 IO_BITMAP_BYTES, GFP_KERNEL);
306 if (!p->thread.io_bitmap_ptr) {
307 p->thread.io_bitmap_max = 0;
310 set_tsk_thread_flag(p, TIF_IO_BITMAP);
314 * Set a new TLS for the child thread?
316 if (clone_flags & CLONE_SETTLS) {
317 #ifdef CONFIG_IA32_EMULATION
318 if (test_thread_flag(TIF_IA32))
319 err = do_set_thread_area(p, -1,
320 (struct user_desc __user *)childregs->si, 0);
323 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
329 if (err && p->thread.io_bitmap_ptr) {
330 kfree(p->thread.io_bitmap_ptr);
331 p->thread.io_bitmap_max = 0;
338 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
339 unsigned long new_sp,
340 unsigned int _cs, unsigned int _ss, unsigned int _ds)
343 loadsegment(es, _ds);
344 loadsegment(ds, _ds);
348 percpu_write(old_rsp, new_sp);
351 regs->flags = X86_EFLAGS_IF;
353 * Free the old FP and other extended state
355 free_thread_xstate(current);
359 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
361 start_thread_common(regs, new_ip, new_sp,
362 __USER_CS, __USER_DS, 0);
365 #ifdef CONFIG_IA32_EMULATION
366 void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
368 start_thread_common(regs, new_ip, new_sp,
369 __USER32_CS, __USER32_DS, __USER32_DS);
374 * switch_to(x,y) should switch tasks from x to y.
376 * This could still be optimized:
377 * - fold all the options into a flag word and test it with a single test.
378 * - could test fs/gs bitsliced
380 * Kprobes not supported here. Set the probe on schedule instead.
381 * Function graph tracer not supported too.
383 __notrace_funcgraph struct task_struct *
384 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
386 struct thread_struct *prev = &prev_p->thread;
387 struct thread_struct *next = &next_p->thread;
388 int cpu = smp_processor_id();
389 struct tss_struct *tss = &per_cpu(init_tss, cpu);
390 unsigned fsindex, gsindex;
393 fpu = switch_fpu_prepare(prev_p, next_p, cpu);
396 * Reload esp0, LDT and the page table pointer:
402 * This won't pick up thread selector changes, but I guess that is ok.
404 savesegment(es, prev->es);
405 if (unlikely(next->es | prev->es))
406 loadsegment(es, next->es);
408 savesegment(ds, prev->ds);
409 if (unlikely(next->ds | prev->ds))
410 loadsegment(ds, next->ds);
413 /* We must save %fs and %gs before load_TLS() because
414 * %fs and %gs may be cleared by load_TLS().
416 * (e.g. xen_load_tls())
418 savesegment(fs, fsindex);
419 savesegment(gs, gsindex);
424 * Leave lazy mode, flushing any hypercalls made here.
425 * This must be done before restoring TLS segments so
426 * the GDT and LDT are properly updated, and must be
427 * done before math_state_restore, so the TS bit is up
430 arch_end_context_switch(next_p);
435 * Segment register != 0 always requires a reload. Also
436 * reload when it has changed. When prev process used 64bit
437 * base always reload to avoid an information leak.
439 if (unlikely(fsindex | next->fsindex | prev->fs)) {
440 loadsegment(fs, next->fsindex);
442 * Check if the user used a selector != 0; if yes
443 * clear 64bit base, since overloaded base is always
444 * mapped to the Null selector
449 /* when next process has a 64bit base use it */
451 wrmsrl(MSR_FS_BASE, next->fs);
452 prev->fsindex = fsindex;
454 if (unlikely(gsindex | next->gsindex | prev->gs)) {
455 load_gs_index(next->gsindex);
460 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
461 prev->gsindex = gsindex;
463 switch_fpu_finish(next_p, fpu);
466 * Switch the PDA and FPU contexts.
468 prev->usersp = percpu_read(old_rsp);
469 percpu_write(old_rsp, next->usersp);
470 percpu_write(current_task, next_p);
472 percpu_write(kernel_stack,
473 (unsigned long)task_stack_page(next_p) +
474 THREAD_SIZE - KERNEL_STACK_OFFSET);
477 * Now maybe reload the debug registers and handle I/O bitmaps
479 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
480 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
481 __switch_to_xtra(prev_p, next_p, tss);
486 void set_personality_64bit(void)
488 /* inherit personality from parent */
490 /* Make sure to be in 64bit mode */
491 clear_thread_flag(TIF_IA32);
493 /* Ensure the corresponding mm is not marked. */
495 current->mm->context.ia32_compat = 0;
497 /* TBD: overwrites user setup. Should have two bits.
498 But 64bit processes have always behaved this way,
499 so it's not too bad. The main problem is just that
500 32bit childs are affected again. */
501 current->personality &= ~READ_IMPLIES_EXEC;
504 void set_personality_ia32(void)
506 /* inherit personality from parent */
508 /* Make sure to be in 32bit mode */
509 set_thread_flag(TIF_IA32);
510 current->personality |= force_personality32;
512 /* Mark the associated mm as containing 32-bit tasks. */
514 current->mm->context.ia32_compat = 1;
516 /* Prepare the first "return" to user space */
517 current_thread_info()->status |= TS_COMPAT;
520 unsigned long get_wchan(struct task_struct *p)
526 if (!p || p == current || p->state == TASK_RUNNING)
528 stack = (unsigned long)task_stack_page(p);
529 if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
531 fp = *(u64 *)(p->thread.sp);
533 if (fp < (unsigned long)stack ||
534 fp >= (unsigned long)stack+THREAD_SIZE)
537 if (!in_sched_functions(ip))
540 } while (count++ < 16);
544 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
547 int doit = task == current;
552 if (addr >= TASK_SIZE_OF(task))
555 /* handle small bases via the GDT because that's faster to
557 if (addr <= 0xffffffff) {
558 set_32bit_tls(task, GS_TLS, addr);
560 load_TLS(&task->thread, cpu);
561 load_gs_index(GS_TLS_SEL);
563 task->thread.gsindex = GS_TLS_SEL;
566 task->thread.gsindex = 0;
567 task->thread.gs = addr;
570 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
576 /* Not strictly needed for fs, but do it for symmetry
578 if (addr >= TASK_SIZE_OF(task))
581 /* handle small bases via the GDT because that's faster to
583 if (addr <= 0xffffffff) {
584 set_32bit_tls(task, FS_TLS, addr);
586 load_TLS(&task->thread, cpu);
587 loadsegment(fs, FS_TLS_SEL);
589 task->thread.fsindex = FS_TLS_SEL;
592 task->thread.fsindex = 0;
593 task->thread.fs = addr;
595 /* set the selector to 0 to not confuse
598 ret = checking_wrmsrl(MSR_FS_BASE, addr);
605 if (task->thread.fsindex == FS_TLS_SEL)
606 base = read_32bit_tls(task, FS_TLS);
608 rdmsrl(MSR_FS_BASE, base);
610 base = task->thread.fs;
611 ret = put_user(base, (unsigned long __user *)addr);
617 if (task->thread.gsindex == GS_TLS_SEL)
618 base = read_32bit_tls(task, GS_TLS);
620 savesegment(gs, gsindex);
622 rdmsrl(MSR_KERNEL_GS_BASE, base);
624 base = task->thread.gs;
626 base = task->thread.gs;
627 ret = put_user(base, (unsigned long __user *)addr);
639 long sys_arch_prctl(int code, unsigned long addr)
641 return do_arch_prctl(current, code, addr);
644 unsigned long KSTK_ESP(struct task_struct *task)
646 return (test_tsk_thread_flag(task, TIF_IA32)) ?
647 (task_pt_regs(task)->sp) : ((task)->thread.usersp);