x86/entry/32: Simplify and fix up the SYSENTER stack #DB/NMI fixup
[linux-2.6-block.git] / arch / x86 / kernel / process.c
CommitLineData
c767a54b
JP
1#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
2
61c4628b
SS
3#include <linux/errno.h>
4#include <linux/kernel.h>
5#include <linux/mm.h>
6#include <linux/smp.h>
389d1fb1 7#include <linux/prctl.h>
61c4628b
SS
8#include <linux/slab.h>
9#include <linux/sched.h>
7f424a8b
PZ
10#include <linux/module.h>
11#include <linux/pm.h>
162a688e 12#include <linux/tick.h>
9d62dcdf 13#include <linux/random.h>
7c68af6e 14#include <linux/user-return-notifier.h>
814e2c84
AI
15#include <linux/dmi.h>
16#include <linux/utsname.h>
90e24014
RW
17#include <linux/stackprotector.h>
18#include <linux/tick.h>
19#include <linux/cpuidle.h>
61613521 20#include <trace/events/power.h>
24f1e32c 21#include <linux/hw_breakpoint.h>
93789b32 22#include <asm/cpu.h>
d3ec5cae 23#include <asm/apic.h>
2c1b284e 24#include <asm/syscalls.h>
389d1fb1
JF
25#include <asm/idle.h>
26#include <asm/uaccess.h>
b253149b 27#include <asm/mwait.h>
78f7f1e5 28#include <asm/fpu/internal.h>
66cb5917 29#include <asm/debugreg.h>
90e24014 30#include <asm/nmi.h>
375074cc 31#include <asm/tlbflush.h>
8838eb6c 32#include <asm/mce.h>
9fda6a06 33#include <asm/vm86.h>
90e24014 34
45046892
TG
35/*
36 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
37 * no more per-task TSS's. The TSS size is kept cacheline-aligned
38 * so they are allowed to end up in the .data..cacheline_aligned
39 * section. Since TSS's are completely CPU-local, we want them
40 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
41 */
d0a0de21
AL
42__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
43 .x86_tss = {
d9e05cc5 44 .sp0 = TOP_OF_INIT_STACK,
d0a0de21
AL
45#ifdef CONFIG_X86_32
46 .ss0 = __KERNEL_DS,
47 .ss1 = __KERNEL_CS,
48 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
49#endif
50 },
51#ifdef CONFIG_X86_32
52 /*
53 * Note that the .io_bitmap member must be extra-big. This is because
54 * the CPU will access an additional byte beyond the end of the IO
55 * permission bitmap. The extra byte must be all 1 bits, and must
56 * be within the limit.
57 */
58 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
59#endif
60};
de71ad2c 61EXPORT_PER_CPU_SYMBOL(cpu_tss);
45046892 62
90e24014
RW
63#ifdef CONFIG_X86_64
64static DEFINE_PER_CPU(unsigned char, is_idle);
65static ATOMIC_NOTIFIER_HEAD(idle_notifier);
66
67void idle_notifier_register(struct notifier_block *n)
68{
69 atomic_notifier_chain_register(&idle_notifier, n);
70}
71EXPORT_SYMBOL_GPL(idle_notifier_register);
72
73void idle_notifier_unregister(struct notifier_block *n)
74{
75 atomic_notifier_chain_unregister(&idle_notifier, n);
76}
77EXPORT_SYMBOL_GPL(idle_notifier_unregister);
78#endif
c1e3b377 79
55ccf3fe
SS
80/*
81 * this gets called so that we can store lazy state into memory and copy the
82 * current task into the new thread.
83 */
61c4628b
SS
84int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
85{
5aaeb5c0 86 memcpy(dst, src, arch_task_struct_size);
2459ee86
AL
87#ifdef CONFIG_VM86
88 dst->thread.vm86 = NULL;
89#endif
f1853505 90
c69e098b 91 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
61c4628b 92}
7f424a8b 93
389d1fb1
JF
94/*
95 * Free current thread data structures etc..
96 */
97void exit_thread(void)
98{
99 struct task_struct *me = current;
100 struct thread_struct *t = &me->thread;
250981e6 101 unsigned long *bp = t->io_bitmap_ptr;
ca6787ba 102 struct fpu *fpu = &t->fpu;
389d1fb1 103
250981e6 104 if (bp) {
24933b82 105 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
389d1fb1 106
389d1fb1
JF
107 t->io_bitmap_ptr = NULL;
108 clear_thread_flag(TIF_IO_BITMAP);
109 /*
110 * Careful, clear this in the TSS too:
111 */
112 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
113 t->io_bitmap_max = 0;
114 put_cpu();
250981e6 115 kfree(bp);
389d1fb1 116 }
1dcc8d7b 117
9fda6a06
BG
118 free_vm86(t);
119
50338615 120 fpu__drop(fpu);
389d1fb1
JF
121}
122
123void flush_thread(void)
124{
125 struct task_struct *tsk = current;
126
24f1e32c 127 flush_ptrace_hw_breakpoint(tsk);
389d1fb1 128 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
110d7f75 129
04c8e01d 130 fpu__clear(&tsk->thread.fpu);
389d1fb1
JF
131}
132
133static void hard_disable_TSC(void)
134{
375074cc 135 cr4_set_bits(X86_CR4_TSD);
389d1fb1
JF
136}
137
138void disable_TSC(void)
139{
140 preempt_disable();
141 if (!test_and_set_thread_flag(TIF_NOTSC))
142 /*
143 * Must flip the CPU state synchronously with
144 * TIF_NOTSC in the current running context.
145 */
146 hard_disable_TSC();
147 preempt_enable();
148}
149
150static void hard_enable_TSC(void)
151{
375074cc 152 cr4_clear_bits(X86_CR4_TSD);
389d1fb1
JF
153}
154
155static void enable_TSC(void)
156{
157 preempt_disable();
158 if (test_and_clear_thread_flag(TIF_NOTSC))
159 /*
160 * Must flip the CPU state synchronously with
161 * TIF_NOTSC in the current running context.
162 */
163 hard_enable_TSC();
164 preempt_enable();
165}
166
167int get_tsc_mode(unsigned long adr)
168{
169 unsigned int val;
170
171 if (test_thread_flag(TIF_NOTSC))
172 val = PR_TSC_SIGSEGV;
173 else
174 val = PR_TSC_ENABLE;
175
176 return put_user(val, (unsigned int __user *)adr);
177}
178
179int set_tsc_mode(unsigned int val)
180{
181 if (val == PR_TSC_SIGSEGV)
182 disable_TSC();
183 else if (val == PR_TSC_ENABLE)
184 enable_TSC();
185 else
186 return -EINVAL;
187
188 return 0;
189}
190
191void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
192 struct tss_struct *tss)
193{
194 struct thread_struct *prev, *next;
195
196 prev = &prev_p->thread;
197 next = &next_p->thread;
198
ea8e61b7
PZ
199 if (test_tsk_thread_flag(prev_p, TIF_BLOCKSTEP) ^
200 test_tsk_thread_flag(next_p, TIF_BLOCKSTEP)) {
201 unsigned long debugctl = get_debugctlmsr();
202
203 debugctl &= ~DEBUGCTLMSR_BTF;
204 if (test_tsk_thread_flag(next_p, TIF_BLOCKSTEP))
205 debugctl |= DEBUGCTLMSR_BTF;
206
207 update_debugctlmsr(debugctl);
208 }
389d1fb1 209
389d1fb1
JF
210 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
211 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
212 /* prev and next are different */
213 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
214 hard_disable_TSC();
215 else
216 hard_enable_TSC();
217 }
218
219 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
220 /*
221 * Copy the relevant range of the IO bitmap.
222 * Normally this is 128 bytes or less:
223 */
224 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
225 max(prev->io_bitmap_max, next->io_bitmap_max));
226 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
227 /*
228 * Clear any possible leftover bits:
229 */
230 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
231 }
7c68af6e 232 propagate_user_return_notify(prev_p, next_p);
389d1fb1
JF
233}
234
00dba564
TG
235/*
236 * Idle related variables and functions
237 */
d1896049 238unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
00dba564
TG
239EXPORT_SYMBOL(boot_option_idle_override);
240
a476bda3 241static void (*x86_idle)(void);
00dba564 242
90e24014
RW
243#ifndef CONFIG_SMP
244static inline void play_dead(void)
245{
246 BUG();
247}
248#endif
249
250#ifdef CONFIG_X86_64
251void enter_idle(void)
252{
c6ae41e7 253 this_cpu_write(is_idle, 1);
90e24014
RW
254 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
255}
256
257static void __exit_idle(void)
258{
259 if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
260 return;
261 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
262}
263
264/* Called from interrupts to signify idle end */
265void exit_idle(void)
266{
267 /* idle loop has pid 0 */
268 if (current->pid)
269 return;
270 __exit_idle();
271}
272#endif
273
7d1a9417
TG
274void arch_cpu_idle_enter(void)
275{
276 local_touch_nmi();
277 enter_idle();
278}
90e24014 279
7d1a9417
TG
280void arch_cpu_idle_exit(void)
281{
282 __exit_idle();
283}
90e24014 284
7d1a9417
TG
285void arch_cpu_idle_dead(void)
286{
287 play_dead();
288}
90e24014 289
7d1a9417
TG
290/*
291 * Called from the generic idle code.
292 */
293void arch_cpu_idle(void)
294{
16f8b05a 295 x86_idle();
90e24014
RW
296}
297
00dba564 298/*
7d1a9417 299 * We use this if we don't have any better idle routine..
00dba564
TG
300 */
301void default_idle(void)
302{
4d0e42cc 303 trace_cpu_idle_rcuidle(1, smp_processor_id());
7d1a9417 304 safe_halt();
4d0e42cc 305 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
00dba564 306}
60b8b1de 307#ifdef CONFIG_APM_MODULE
00dba564
TG
308EXPORT_SYMBOL(default_idle);
309#endif
310
6a377ddc
LB
311#ifdef CONFIG_XEN
312bool xen_set_default_idle(void)
e5fd47bf 313{
a476bda3 314 bool ret = !!x86_idle;
e5fd47bf 315
a476bda3 316 x86_idle = default_idle;
e5fd47bf
KRW
317
318 return ret;
319}
6a377ddc 320#endif
d3ec5cae
IV
321void stop_this_cpu(void *dummy)
322{
323 local_irq_disable();
324 /*
325 * Remove this CPU:
326 */
4f062896 327 set_cpu_online(smp_processor_id(), false);
d3ec5cae 328 disable_local_APIC();
8838eb6c 329 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
d3ec5cae 330
27be4570
LB
331 for (;;)
332 halt();
7f424a8b
PZ
333}
334
02c68a02
LB
335bool amd_e400_c1e_detected;
336EXPORT_SYMBOL(amd_e400_c1e_detected);
aa276e1c 337
02c68a02 338static cpumask_var_t amd_e400_c1e_mask;
4faac97d 339
02c68a02 340void amd_e400_remove_cpu(int cpu)
4faac97d 341{
02c68a02
LB
342 if (amd_e400_c1e_mask != NULL)
343 cpumask_clear_cpu(cpu, amd_e400_c1e_mask);
4faac97d
TG
344}
345
aa276e1c 346/*
02c68a02 347 * AMD Erratum 400 aware idle routine. We check for C1E active in the interrupt
aa276e1c
TG
348 * pending message MSR. If we detect C1E, then we handle it the same
349 * way as C3 power states (local apic timer and TSC stop)
350 */
02c68a02 351static void amd_e400_idle(void)
aa276e1c 352{
02c68a02 353 if (!amd_e400_c1e_detected) {
aa276e1c
TG
354 u32 lo, hi;
355
356 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
e8c534ec 357
aa276e1c 358 if (lo & K8_INTP_C1E_ACTIVE_MASK) {
02c68a02 359 amd_e400_c1e_detected = true;
40fb1715 360 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
09bfeea1 361 mark_tsc_unstable("TSC halt in AMD C1E");
c767a54b 362 pr_info("System has AMD C1E enabled\n");
aa276e1c
TG
363 }
364 }
365
02c68a02 366 if (amd_e400_c1e_detected) {
aa276e1c
TG
367 int cpu = smp_processor_id();
368
02c68a02
LB
369 if (!cpumask_test_cpu(cpu, amd_e400_c1e_mask)) {
370 cpumask_set_cpu(cpu, amd_e400_c1e_mask);
162a688e
TG
371 /* Force broadcast so ACPI can not interfere. */
372 tick_broadcast_force();
c767a54b 373 pr_info("Switch to broadcast mode on CPU%d\n", cpu);
aa276e1c 374 }
435c350e 375 tick_broadcast_enter();
0beefa20 376
aa276e1c 377 default_idle();
0beefa20
TG
378
379 /*
380 * The switch back from broadcast mode needs to be
381 * called with interrupts disabled.
382 */
ea811747 383 local_irq_disable();
435c350e 384 tick_broadcast_exit();
ea811747 385 local_irq_enable();
aa276e1c
TG
386 } else
387 default_idle();
388}
389
b253149b
LB
390/*
391 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
392 * We can't rely on cpuidle installing MWAIT, because it will not load
393 * on systems that support only C1 -- so the boot default must be MWAIT.
394 *
395 * Some AMD machines are the opposite, they depend on using HALT.
396 *
397 * So for default C1, which is used during boot until cpuidle loads,
398 * use MWAIT-C1 on Intel HW that has it, else use HALT.
399 */
400static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
401{
402 if (c->x86_vendor != X86_VENDOR_INTEL)
403 return 0;
404
405 if (!cpu_has(c, X86_FEATURE_MWAIT))
406 return 0;
407
408 return 1;
409}
410
411/*
0fb0328d
HR
412 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
413 * with interrupts enabled and no flags, which is backwards compatible with the
414 * original MWAIT implementation.
b253149b 415 */
b253149b
LB
416static void mwait_idle(void)
417{
f8e617f4 418 if (!current_set_polling_and_test()) {
e43d0189 419 trace_cpu_idle_rcuidle(1, smp_processor_id());
f8e617f4
MG
420 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
421 smp_mb(); /* quirk */
b253149b 422 clflush((void *)&current_thread_info()->flags);
f8e617f4
MG
423 smp_mb(); /* quirk */
424 }
b253149b
LB
425
426 __monitor((void *)&current_thread_info()->flags, 0, 0);
b253149b
LB
427 if (!need_resched())
428 __sti_mwait(0, 0);
429 else
430 local_irq_enable();
e43d0189 431 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
f8e617f4 432 } else {
b253149b 433 local_irq_enable();
f8e617f4
MG
434 }
435 __current_clr_polling();
b253149b
LB
436}
437
148f9bb8 438void select_idle_routine(const struct cpuinfo_x86 *c)
7f424a8b 439{
3e5095d1 440#ifdef CONFIG_SMP
7d1a9417 441 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
c767a54b 442 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
7f424a8b 443#endif
7d1a9417 444 if (x86_idle || boot_option_idle_override == IDLE_POLL)
6ddd2a27
TG
445 return;
446
7d7dc116 447 if (cpu_has_bug(c, X86_BUG_AMD_APIC_C1E)) {
9d8888c2 448 /* E400: APIC timer interrupt does not wake up CPU from C1e */
c767a54b 449 pr_info("using AMD E400 aware idle routine\n");
a476bda3 450 x86_idle = amd_e400_idle;
b253149b
LB
451 } else if (prefer_mwait_c1_over_halt(c)) {
452 pr_info("using mwait in idle threads\n");
453 x86_idle = mwait_idle;
6ddd2a27 454 } else
a476bda3 455 x86_idle = default_idle;
7f424a8b
PZ
456}
457
02c68a02 458void __init init_amd_e400_c1e_mask(void)
30e1e6d1 459{
02c68a02 460 /* If we're using amd_e400_idle, we need to allocate amd_e400_c1e_mask. */
a476bda3 461 if (x86_idle == amd_e400_idle)
02c68a02 462 zalloc_cpumask_var(&amd_e400_c1e_mask, GFP_KERNEL);
30e1e6d1
RR
463}
464
7f424a8b
PZ
465static int __init idle_setup(char *str)
466{
ab6bc3e3
CG
467 if (!str)
468 return -EINVAL;
469
7f424a8b 470 if (!strcmp(str, "poll")) {
c767a54b 471 pr_info("using polling idle threads\n");
d1896049 472 boot_option_idle_override = IDLE_POLL;
7d1a9417 473 cpu_idle_poll_ctrl(true);
d1896049 474 } else if (!strcmp(str, "halt")) {
c1e3b377
ZY
475 /*
476 * When the boot option of idle=halt is added, halt is
477 * forced to be used for CPU idle. In such case CPU C2/C3
478 * won't be used again.
479 * To continue to load the CPU idle driver, don't touch
480 * the boot_option_idle_override.
481 */
a476bda3 482 x86_idle = default_idle;
d1896049 483 boot_option_idle_override = IDLE_HALT;
da5e09a1
ZY
484 } else if (!strcmp(str, "nomwait")) {
485 /*
486 * If the boot option of "idle=nomwait" is added,
487 * it means that mwait will be disabled for CPU C2/C3
488 * states. In such case it won't touch the variable
489 * of boot_option_idle_override.
490 */
d1896049 491 boot_option_idle_override = IDLE_NOMWAIT;
c1e3b377 492 } else
7f424a8b
PZ
493 return -1;
494
7f424a8b
PZ
495 return 0;
496}
497early_param("idle", idle_setup);
498
9d62dcdf
AW
499unsigned long arch_align_stack(unsigned long sp)
500{
501 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
502 sp -= get_random_int() % 8192;
503 return sp & ~0xf;
504}
505
506unsigned long arch_randomize_brk(struct mm_struct *mm)
507{
508 unsigned long range_end = mm->brk + 0x02000000;
509 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
510}
511
7ba78053
TG
512/*
513 * Called from fs/proc with a reference on @p to find the function
514 * which called into schedule(). This needs to be done carefully
515 * because the task might wake up and we might look at a stack
516 * changing under us.
517 */
518unsigned long get_wchan(struct task_struct *p)
519{
520 unsigned long start, bottom, top, sp, fp, ip;
521 int count = 0;
522
523 if (!p || p == current || p->state == TASK_RUNNING)
524 return 0;
525
526 start = (unsigned long)task_stack_page(p);
527 if (!start)
528 return 0;
529
530 /*
531 * Layout of the stack page:
532 *
533 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
534 * PADDING
535 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
536 * stack
537 * ----------- bottom = start + sizeof(thread_info)
538 * thread_info
539 * ----------- start
540 *
541 * The tasks stack pointer points at the location where the
542 * framepointer is stored. The data on the stack is:
543 * ... IP FP ... IP FP
544 *
545 * We need to read FP and IP, so we need to adjust the upper
546 * bound by another unsigned long.
547 */
548 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
549 top -= 2 * sizeof(unsigned long);
550 bottom = start + sizeof(struct thread_info);
551
552 sp = READ_ONCE(p->thread.sp);
553 if (sp < bottom || sp > top)
554 return 0;
555
f7d27c35 556 fp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
7ba78053
TG
557 do {
558 if (fp < bottom || fp > top)
559 return 0;
f7d27c35 560 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
7ba78053
TG
561 if (!in_sched_functions(ip))
562 return ip;
f7d27c35 563 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
7ba78053
TG
564 } while (count++ < 16 && p->state != TASK_RUNNING);
565 return 0;
566}