cpuidle: Make cpuidle_enable_device() call poll_idle_init()
[linux-2.6-block.git] / arch / x86 / kernel / process.c
CommitLineData
61c4628b
SS
1#include <linux/errno.h>
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/smp.h>
389d1fb1 5#include <linux/prctl.h>
61c4628b
SS
6#include <linux/slab.h>
7#include <linux/sched.h>
7f424a8b
PZ
8#include <linux/module.h>
9#include <linux/pm.h>
aa276e1c 10#include <linux/clockchips.h>
9d62dcdf 11#include <linux/random.h>
7c68af6e 12#include <linux/user-return-notifier.h>
814e2c84
AI
13#include <linux/dmi.h>
14#include <linux/utsname.h>
61613521 15#include <trace/events/power.h>
24f1e32c 16#include <linux/hw_breakpoint.h>
c1e3b377 17#include <asm/system.h>
d3ec5cae 18#include <asm/apic.h>
2c1b284e 19#include <asm/syscalls.h>
389d1fb1
JF
20#include <asm/idle.h>
21#include <asm/uaccess.h>
22#include <asm/i387.h>
66cb5917 23#include <asm/debugreg.h>
c1e3b377
ZY
24
25unsigned long idle_halt;
26EXPORT_SYMBOL(idle_halt);
da5e09a1
ZY
27unsigned long idle_nomwait;
28EXPORT_SYMBOL(idle_nomwait);
61c4628b 29
aa283f49 30struct kmem_cache *task_xstate_cachep;
5ee481da 31EXPORT_SYMBOL_GPL(task_xstate_cachep);
61c4628b
SS
32
33int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
34{
86603283
AK
35 int ret;
36
61c4628b 37 *dst = *src;
86603283
AK
38 if (fpu_allocated(&src->thread.fpu)) {
39 memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu));
40 ret = fpu_alloc(&dst->thread.fpu);
41 if (ret)
42 return ret;
43 fpu_copy(&dst->thread.fpu, &src->thread.fpu);
aa283f49 44 }
61c4628b
SS
45 return 0;
46}
47
aa283f49 48void free_thread_xstate(struct task_struct *tsk)
61c4628b 49{
86603283 50 fpu_free(&tsk->thread.fpu);
aa283f49
SS
51}
52
aa283f49
SS
53void free_thread_info(struct thread_info *ti)
54{
55 free_thread_xstate(ti->task);
1679f271 56 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
61c4628b
SS
57}
58
59void arch_task_cache_init(void)
60{
61 task_xstate_cachep =
62 kmem_cache_create("task_xstate", xstate_size,
63 __alignof__(union thread_xstate),
2dff4405 64 SLAB_PANIC | SLAB_NOTRACK, NULL);
61c4628b 65}
7f424a8b 66
389d1fb1
JF
67/*
68 * Free current thread data structures etc..
69 */
70void exit_thread(void)
71{
72 struct task_struct *me = current;
73 struct thread_struct *t = &me->thread;
250981e6 74 unsigned long *bp = t->io_bitmap_ptr;
389d1fb1 75
250981e6 76 if (bp) {
389d1fb1
JF
77 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
78
389d1fb1
JF
79 t->io_bitmap_ptr = NULL;
80 clear_thread_flag(TIF_IO_BITMAP);
81 /*
82 * Careful, clear this in the TSS too:
83 */
84 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
85 t->io_bitmap_max = 0;
86 put_cpu();
250981e6 87 kfree(bp);
389d1fb1 88 }
389d1fb1
JF
89}
90
3bef4447
BG
91void show_regs(struct pt_regs *regs)
92{
93 show_registers(regs);
94 show_trace(NULL, regs, (unsigned long *)kernel_stack_pointer(regs),
95 regs->bp);
96}
97
814e2c84
AI
98void show_regs_common(void)
99{
a1884b8e 100 const char *board, *product;
814e2c84 101
a1884b8e 102 board = dmi_get_system_info(DMI_BOARD_NAME);
814e2c84
AI
103 if (!board)
104 board = "";
a1884b8e
AI
105 product = dmi_get_system_info(DMI_PRODUCT_NAME);
106 if (!product)
107 product = "";
814e2c84 108
d015a092
PE
109 printk(KERN_CONT "\n");
110 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n",
814e2c84
AI
111 current->pid, current->comm, print_tainted(),
112 init_utsname()->release,
113 (int)strcspn(init_utsname()->version, " "),
a1884b8e 114 init_utsname()->version, board, product);
814e2c84
AI
115}
116
389d1fb1
JF
117void flush_thread(void)
118{
119 struct task_struct *tsk = current;
120
24f1e32c 121 flush_ptrace_hw_breakpoint(tsk);
389d1fb1
JF
122 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
123 /*
124 * Forget coprocessor state..
125 */
126 tsk->fpu_counter = 0;
127 clear_fpu(tsk);
128 clear_used_math();
129}
130
131static void hard_disable_TSC(void)
132{
133 write_cr4(read_cr4() | X86_CR4_TSD);
134}
135
136void disable_TSC(void)
137{
138 preempt_disable();
139 if (!test_and_set_thread_flag(TIF_NOTSC))
140 /*
141 * Must flip the CPU state synchronously with
142 * TIF_NOTSC in the current running context.
143 */
144 hard_disable_TSC();
145 preempt_enable();
146}
147
148static void hard_enable_TSC(void)
149{
150 write_cr4(read_cr4() & ~X86_CR4_TSD);
151}
152
153static void enable_TSC(void)
154{
155 preempt_disable();
156 if (test_and_clear_thread_flag(TIF_NOTSC))
157 /*
158 * Must flip the CPU state synchronously with
159 * TIF_NOTSC in the current running context.
160 */
161 hard_enable_TSC();
162 preempt_enable();
163}
164
165int get_tsc_mode(unsigned long adr)
166{
167 unsigned int val;
168
169 if (test_thread_flag(TIF_NOTSC))
170 val = PR_TSC_SIGSEGV;
171 else
172 val = PR_TSC_ENABLE;
173
174 return put_user(val, (unsigned int __user *)adr);
175}
176
177int set_tsc_mode(unsigned int val)
178{
179 if (val == PR_TSC_SIGSEGV)
180 disable_TSC();
181 else if (val == PR_TSC_ENABLE)
182 enable_TSC();
183 else
184 return -EINVAL;
185
186 return 0;
187}
188
189void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
190 struct tss_struct *tss)
191{
192 struct thread_struct *prev, *next;
193
194 prev = &prev_p->thread;
195 next = &next_p->thread;
196
ea8e61b7
PZ
197 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
198 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
199 unsigned long debugctl = get_debugctlmsr();
200
201 debugctl &= ~DEBUGCTLMSR_BTF;
202 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
203 debugctl |= DEBUGCTLMSR_BTF;
204
205 update_debugctlmsr(debugctl);
206 }
389d1fb1 207
389d1fb1
JF
208 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
209 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
210 /* prev and next are different */
211 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
212 hard_disable_TSC();
213 else
214 hard_enable_TSC();
215 }
216
217 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
218 /*
219 * Copy the relevant range of the IO bitmap.
220 * Normally this is 128 bytes or less:
221 */
222 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
223 max(prev->io_bitmap_max, next->io_bitmap_max));
224 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
225 /*
226 * Clear any possible leftover bits:
227 */
228 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
229 }
7c68af6e 230 propagate_user_return_notify(prev_p, next_p);
389d1fb1
JF
231}
232
233int sys_fork(struct pt_regs *regs)
234{
235 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
236}
237
238/*
239 * This is trivial, and on the face of it looks like it
240 * could equally well be done in user mode.
241 *
242 * Not so, for quite unobvious reasons - register pressure.
243 * In user mode vfork() cannot have a stack frame, and if
244 * done by calling the "clone()" system call directly, you
245 * do not have enough call-clobbered registers to hold all
246 * the information you need.
247 */
248int sys_vfork(struct pt_regs *regs)
249{
250 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
251 NULL, NULL);
252}
253
f839bbc5
BG
254long
255sys_clone(unsigned long clone_flags, unsigned long newsp,
256 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
257{
258 if (!newsp)
259 newsp = regs->sp;
260 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
261}
262
df59e7bf
BG
263/*
264 * This gets run with %si containing the
265 * function to call, and %di containing
266 * the "args".
267 */
268extern void kernel_thread_helper(void);
269
270/*
271 * Create a kernel thread
272 */
273int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
274{
275 struct pt_regs regs;
276
277 memset(&regs, 0, sizeof(regs));
278
279 regs.si = (unsigned long) fn;
280 regs.di = (unsigned long) arg;
281
282#ifdef CONFIG_X86_32
283 regs.ds = __USER_DS;
284 regs.es = __USER_DS;
285 regs.fs = __KERNEL_PERCPU;
286 regs.gs = __KERNEL_STACK_CANARY;
864a0922
CG
287#else
288 regs.ss = __KERNEL_DS;
df59e7bf
BG
289#endif
290
291 regs.orig_ax = -1;
292 regs.ip = (unsigned long) kernel_thread_helper;
293 regs.cs = __KERNEL_CS | get_kernel_rpl();
294 regs.flags = X86_EFLAGS_IF | 0x2;
295
296 /* Ok, create the new process.. */
297 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
298}
299EXPORT_SYMBOL(kernel_thread);
389d1fb1 300
11cf88bd
BG
301/*
302 * sys_execve() executes a new program.
303 */
d7627467
DH
304long sys_execve(const char __user *name,
305 const char __user *const __user *argv,
306 const char __user *const __user *envp, struct pt_regs *regs)
11cf88bd
BG
307{
308 long error;
309 char *filename;
310
311 filename = getname(name);
312 error = PTR_ERR(filename);
313 if (IS_ERR(filename))
314 return error;
315 error = do_execve(filename, argv, envp, regs);
316
317#ifdef CONFIG_X86_32
318 if (error == 0) {
319 /* Make sure we don't return using sysenter.. */
320 set_thread_flag(TIF_IRET);
321 }
322#endif
323
324 putname(filename);
325 return error;
326}
389d1fb1 327
00dba564
TG
328/*
329 * Idle related variables and functions
330 */
331unsigned long boot_option_idle_override = 0;
332EXPORT_SYMBOL(boot_option_idle_override);
333
334/*
335 * Powermanagement idle function, if any..
336 */
337void (*pm_idle)(void);
338EXPORT_SYMBOL(pm_idle);
339
340#ifdef CONFIG_X86_32
341/*
342 * This halt magic was a workaround for ancient floppy DMA
343 * wreckage. It should be safe to remove.
344 */
345static int hlt_counter;
346void disable_hlt(void)
347{
348 hlt_counter++;
349}
350EXPORT_SYMBOL(disable_hlt);
351
352void enable_hlt(void)
353{
354 hlt_counter--;
355}
356EXPORT_SYMBOL(enable_hlt);
357
358static inline int hlt_use_halt(void)
359{
360 return (!hlt_counter && boot_cpu_data.hlt_works_ok);
361}
362#else
363static inline int hlt_use_halt(void)
364{
365 return 1;
366}
367#endif
368
369/*
370 * We use this if we don't have any better
371 * idle routine..
372 */
373void default_idle(void)
374{
375 if (hlt_use_halt()) {
6f4f2723 376 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
00dba564
TG
377 current_thread_info()->status &= ~TS_POLLING;
378 /*
379 * TS_POLLING-cleared state must be visible before we
380 * test NEED_RESCHED:
381 */
382 smp_mb();
383
384 if (!need_resched())
385 safe_halt(); /* enables interrupts racelessly */
386 else
387 local_irq_enable();
388 current_thread_info()->status |= TS_POLLING;
389 } else {
390 local_irq_enable();
391 /* loop is done by the caller */
392 cpu_relax();
393 }
394}
395#ifdef CONFIG_APM_MODULE
396EXPORT_SYMBOL(default_idle);
397#endif
398
d3ec5cae
IV
399void stop_this_cpu(void *dummy)
400{
401 local_irq_disable();
402 /*
403 * Remove this CPU:
404 */
4f062896 405 set_cpu_online(smp_processor_id(), false);
d3ec5cae
IV
406 disable_local_APIC();
407
408 for (;;) {
409 if (hlt_works(smp_processor_id()))
410 halt();
411 }
412}
413
7f424a8b
PZ
414static void do_nothing(void *unused)
415{
416}
417
418/*
419 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
420 * pm_idle and update to new pm_idle value. Required while changing pm_idle
421 * handler on SMP systems.
422 *
423 * Caller must have changed pm_idle to the new value before the call. Old
424 * pm_idle value will not be used by any CPU after the return of this function.
425 */
426void cpu_idle_wait(void)
427{
428 smp_mb();
429 /* kick all the CPUs so that they exit out of pm_idle */
127a237a 430 smp_call_function(do_nothing, NULL, 1);
7f424a8b
PZ
431}
432EXPORT_SYMBOL_GPL(cpu_idle_wait);
433
434/*
435 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
436 * which can obviate IPI to trigger checking of need_resched.
437 * We execute MONITOR against need_resched and enter optimized wait state
438 * through MWAIT. Whenever someone changes need_resched, we would be woken
439 * up from MWAIT (without an IPI).
440 *
441 * New with Core Duo processors, MWAIT can take some hints based on CPU
442 * capability.
443 */
444void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
445{
6f4f2723 446 trace_power_start(POWER_CSTATE, (ax>>4)+1, smp_processor_id());
7f424a8b 447 if (!need_resched()) {
e736ad54
PV
448 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
449 clflush((void *)&current_thread_info()->flags);
450
7f424a8b
PZ
451 __monitor((void *)&current_thread_info()->flags, 0, 0);
452 smp_mb();
453 if (!need_resched())
454 __mwait(ax, cx);
455 }
456}
457
458/* Default MONITOR/MWAIT with no hints, used for default C1 state */
459static void mwait_idle(void)
460{
461 if (!need_resched()) {
6f4f2723 462 trace_power_start(POWER_CSTATE, 1, smp_processor_id());
e736ad54
PV
463 if (cpu_has(&current_cpu_data, X86_FEATURE_CLFLUSH_MONITOR))
464 clflush((void *)&current_thread_info()->flags);
465
7f424a8b
PZ
466 __monitor((void *)&current_thread_info()->flags, 0, 0);
467 smp_mb();
468 if (!need_resched())
469 __sti_mwait(0, 0);
470 else
471 local_irq_enable();
472 } else
473 local_irq_enable();
474}
475
7f424a8b
PZ
476/*
477 * On SMP it's slightly faster (but much more power-consuming!)
478 * to poll the ->work.need_resched flag instead of waiting for the
479 * cross-CPU IPI to arrive. Use this option with caution.
480 */
481static void poll_idle(void)
482{
6f4f2723 483 trace_power_start(POWER_CSTATE, 0, smp_processor_id());
7f424a8b 484 local_irq_enable();
2c7e9fd4
JK
485 while (!need_resched())
486 cpu_relax();
61613521 487 trace_power_end(0);
7f424a8b
PZ
488}
489
e9623b35
TG
490/*
491 * mwait selection logic:
492 *
493 * It depends on the CPU. For AMD CPUs that support MWAIT this is
494 * wrong. Family 0x10 and 0x11 CPUs will enter C1 on HLT. Powersavings
495 * then depend on a clock divisor and current Pstate of the core. If
496 * all cores of a processor are in halt state (C1) the processor can
497 * enter the C1E (C1 enhanced) state. If mwait is used this will never
498 * happen.
499 *
500 * idle=mwait overrides this decision and forces the usage of mwait.
501 */
08ad8afa 502static int __cpuinitdata force_mwait;
09fd4b4e
TG
503
504#define MWAIT_INFO 0x05
505#define MWAIT_ECX_EXTENDED_INFO 0x01
506#define MWAIT_EDX_C1 0xf0
507
e9623b35
TG
508static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
509{
09fd4b4e
TG
510 u32 eax, ebx, ecx, edx;
511
e9623b35
TG
512 if (force_mwait)
513 return 1;
514
09fd4b4e
TG
515 if (c->cpuid_level < MWAIT_INFO)
516 return 0;
517
518 cpuid(MWAIT_INFO, &eax, &ebx, &ecx, &edx);
519 /* Check, whether EDX has extended info about MWAIT */
520 if (!(ecx & MWAIT_ECX_EXTENDED_INFO))
521 return 1;
522
523 /*
524 * edx enumeratios MONITOR/MWAIT extensions. Check, whether
525 * C1 supports MWAIT
526 */
527 return (edx & MWAIT_EDX_C1);
e9623b35
TG
528}
529
e8c534ec
MS
530bool c1e_detected;
531EXPORT_SYMBOL(c1e_detected);
aa276e1c 532
bc9b83dd 533static cpumask_var_t c1e_mask;
4faac97d
TG
534
535void c1e_remove_cpu(int cpu)
536{
30e1e6d1
RR
537 if (c1e_mask != NULL)
538 cpumask_clear_cpu(cpu, c1e_mask);
4faac97d
TG
539}
540
aa276e1c
TG
541/*
542 * C1E aware idle routine. We check for C1E active in the interrupt
543 * pending message MSR. If we detect C1E, then we handle it the same
544 * way as C3 power states (local apic timer and TSC stop)
545 */
546static void c1e_idle(void)
547{
aa276e1c
TG
548 if (need_resched())
549 return;
550
551 if (!c1e_detected) {
552 u32 lo, hi;
553
554 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
e8c534ec 555
aa276e1c 556 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
e8c534ec 557 c1e_detected = true;
40fb1715 558 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
09bfeea1
AH
559 mark_tsc_unstable("TSC halt in AMD C1E");
560 printk(KERN_INFO "System has AMD C1E enabled\n");
aa276e1c
TG
561 }
562 }
563
564 if (c1e_detected) {
565 int cpu = smp_processor_id();
566
bc9b83dd
RR
567 if (!cpumask_test_cpu(cpu, c1e_mask)) {
568 cpumask_set_cpu(cpu, c1e_mask);
0beefa20 569 /*
f833bab8 570 * Force broadcast so ACPI can not interfere.
0beefa20 571 */
aa276e1c
TG
572 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
573 &cpu);
574 printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
575 cpu);
576 }
577 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
0beefa20 578
aa276e1c 579 default_idle();
0beefa20
TG
580
581 /*
582 * The switch back from broadcast mode needs to be
583 * called with interrupts disabled.
584 */
585 local_irq_disable();
586 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
587 local_irq_enable();
aa276e1c
TG
588 } else
589 default_idle();
590}
591
7f424a8b
PZ
592void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
593{
3e5095d1 594#ifdef CONFIG_SMP
7f424a8b 595 if (pm_idle == poll_idle && smp_num_siblings > 1) {
d6dd6921 596 printk_once(KERN_WARNING "WARNING: polling idle and HT enabled,"
7f424a8b
PZ
597 " performance may degrade.\n");
598 }
599#endif
6ddd2a27
TG
600 if (pm_idle)
601 return;
602
e9623b35 603 if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
7f424a8b 604 /*
7f424a8b
PZ
605 * One CPU supports mwait => All CPUs supports mwait
606 */
6ddd2a27
TG
607 printk(KERN_INFO "using mwait in idle threads.\n");
608 pm_idle = mwait_idle;
9d8888c2
HR
609 } else if (cpu_has_amd_erratum(amd_erratum_400)) {
610 /* E400: APIC timer interrupt does not wake up CPU from C1e */
aa276e1c
TG
611 printk(KERN_INFO "using C1E aware idle routine\n");
612 pm_idle = c1e_idle;
6ddd2a27
TG
613 } else
614 pm_idle = default_idle;
7f424a8b
PZ
615}
616
30e1e6d1
RR
617void __init init_c1e_mask(void)
618{
619 /* If we're using c1e_idle, we need to allocate c1e_mask. */
79f55997
LZ
620 if (pm_idle == c1e_idle)
621 zalloc_cpumask_var(&c1e_mask, GFP_KERNEL);
30e1e6d1
RR
622}
623
7f424a8b
PZ
624static int __init idle_setup(char *str)
625{
ab6bc3e3
CG
626 if (!str)
627 return -EINVAL;
628
7f424a8b
PZ
629 if (!strcmp(str, "poll")) {
630 printk("using polling idle threads.\n");
631 pm_idle = poll_idle;
632 } else if (!strcmp(str, "mwait"))
633 force_mwait = 1;
c1e3b377
ZY
634 else if (!strcmp(str, "halt")) {
635 /*
636 * When the boot option of idle=halt is added, halt is
637 * forced to be used for CPU idle. In such case CPU C2/C3
638 * won't be used again.
639 * To continue to load the CPU idle driver, don't touch
640 * the boot_option_idle_override.
641 */
642 pm_idle = default_idle;
643 idle_halt = 1;
644 return 0;
da5e09a1
ZY
645 } else if (!strcmp(str, "nomwait")) {
646 /*
647 * If the boot option of "idle=nomwait" is added,
648 * it means that mwait will be disabled for CPU C2/C3
649 * states. In such case it won't touch the variable
650 * of boot_option_idle_override.
651 */
652 idle_nomwait = 1;
653 return 0;
c1e3b377 654 } else
7f424a8b
PZ
655 return -1;
656
657 boot_option_idle_override = 1;
658 return 0;
659}
660early_param("idle", idle_setup);
661
9d62dcdf
AW
662unsigned long arch_align_stack(unsigned long sp)
663{
664 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
665 sp -= get_random_int() % 8192;
666 return sp & ~0xf;
667}
668
669unsigned long arch_randomize_brk(struct mm_struct *mm)
670{
671 unsigned long range_end = mm->brk + 0x02000000;
672 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
673}
674