1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3 #include <linux/errno.h>
4 #include <linux/kernel.h>
7 #include <linux/prctl.h>
8 #include <linux/slab.h>
9 #include <linux/sched.h>
10 #include <linux/sched/idle.h>
11 #include <linux/sched/debug.h>
12 #include <linux/sched/task.h>
13 #include <linux/sched/task_stack.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
17 #include <linux/tick.h>
18 #include <linux/random.h>
19 #include <linux/user-return-notifier.h>
20 #include <linux/dmi.h>
21 #include <linux/utsname.h>
22 #include <linux/stackprotector.h>
23 #include <linux/tick.h>
24 #include <linux/cpuidle.h>
25 #include <trace/events/power.h>
26 #include <linux/hw_breakpoint.h>
29 #include <asm/syscalls.h>
30 #include <linux/uaccess.h>
31 #include <asm/mwait.h>
32 #include <asm/fpu/internal.h>
33 #include <asm/debugreg.h>
35 #include <asm/tlbflush.h>
38 #include <asm/switch_to.h>
40 #include <asm/prctl.h>
43 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
44 * no more per-task TSS's. The TSS size is kept cacheline-aligned
45 * so they are allowed to end up in the .data..cacheline_aligned
46 * section. Since TSS's are completely CPU-local, we want them
47 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
49 __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
51 .sp0 = TOP_OF_INIT_STACK,
55 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET,
60 * Note that the .io_bitmap member must be extra-big. This is because
61 * the CPU will access an additional byte beyond the end of the IO
62 * permission bitmap. The extra byte must be all 1 bits, and must
63 * be within the limit.
65 .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
68 .SYSENTER_stack_canary = STACK_END_MAGIC,
71 EXPORT_PER_CPU_SYMBOL(cpu_tss);
73 DEFINE_PER_CPU(bool, __tss_limit_invalid);
74 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
77 * this gets called so that we can store lazy state into memory and copy the
78 * current task into the new thread.
80 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
82 memcpy(dst, src, arch_task_struct_size);
84 dst->thread.vm86 = NULL;
87 return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
91 * Free current thread data structures etc..
93 void exit_thread(struct task_struct *tsk)
95 struct thread_struct *t = &tsk->thread;
96 unsigned long *bp = t->io_bitmap_ptr;
97 struct fpu *fpu = &t->fpu;
100 struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
102 t->io_bitmap_ptr = NULL;
103 clear_thread_flag(TIF_IO_BITMAP);
105 * Careful, clear this in the TSS too:
107 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
108 t->io_bitmap_max = 0;
118 void flush_thread(void)
120 struct task_struct *tsk = current;
122 flush_ptrace_hw_breakpoint(tsk);
123 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
125 fpu__clear(&tsk->thread.fpu);
128 void disable_TSC(void)
131 if (!test_and_set_thread_flag(TIF_NOTSC))
133 * Must flip the CPU state synchronously with
134 * TIF_NOTSC in the current running context.
136 cr4_set_bits(X86_CR4_TSD);
140 static void enable_TSC(void)
143 if (test_and_clear_thread_flag(TIF_NOTSC))
145 * Must flip the CPU state synchronously with
146 * TIF_NOTSC in the current running context.
148 cr4_clear_bits(X86_CR4_TSD);
152 int get_tsc_mode(unsigned long adr)
156 if (test_thread_flag(TIF_NOTSC))
157 val = PR_TSC_SIGSEGV;
161 return put_user(val, (unsigned int __user *)adr);
164 int set_tsc_mode(unsigned int val)
166 if (val == PR_TSC_SIGSEGV)
168 else if (val == PR_TSC_ENABLE)
176 DEFINE_PER_CPU(u64, msr_misc_features_shadow);
178 static void set_cpuid_faulting(bool on)
182 msrval = this_cpu_read(msr_misc_features_shadow);
183 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
184 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
185 this_cpu_write(msr_misc_features_shadow, msrval);
186 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
189 static void disable_cpuid(void)
192 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
194 * Must flip the CPU state synchronously with
195 * TIF_NOCPUID in the current running context.
197 set_cpuid_faulting(true);
202 static void enable_cpuid(void)
205 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
207 * Must flip the CPU state synchronously with
208 * TIF_NOCPUID in the current running context.
210 set_cpuid_faulting(false);
215 static int get_cpuid_mode(void)
217 return !test_thread_flag(TIF_NOCPUID);
220 static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
222 if (!static_cpu_has(X86_FEATURE_CPUID_FAULT))
234 * Called immediately after a successful exec.
236 void arch_setup_new_exec(void)
238 /* If cpuid was previously disabled for this task, re-enable it. */
239 if (test_thread_flag(TIF_NOCPUID))
243 static inline void switch_to_bitmap(struct tss_struct *tss,
244 struct thread_struct *prev,
245 struct thread_struct *next,
246 unsigned long tifp, unsigned long tifn)
248 if (tifn & _TIF_IO_BITMAP) {
250 * Copy the relevant range of the IO bitmap.
251 * Normally this is 128 bytes or less:
253 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
254 max(prev->io_bitmap_max, next->io_bitmap_max));
256 * Make sure that the TSS limit is correct for the CPU
257 * to notice the IO bitmap.
260 } else if (tifp & _TIF_IO_BITMAP) {
262 * Clear any possible leftover bits:
264 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
268 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
269 struct tss_struct *tss)
271 struct thread_struct *prev, *next;
272 unsigned long tifp, tifn;
274 prev = &prev_p->thread;
275 next = &next_p->thread;
277 tifn = READ_ONCE(task_thread_info(next_p)->flags);
278 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
279 switch_to_bitmap(tss, prev, next, tifp, tifn);
281 propagate_user_return_notify(prev_p, next_p);
283 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
284 arch_has_block_step()) {
285 unsigned long debugctl, msk;
287 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
288 debugctl &= ~DEBUGCTLMSR_BTF;
289 msk = tifn & _TIF_BLOCKSTEP;
290 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
291 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
294 if ((tifp ^ tifn) & _TIF_NOTSC)
295 cr4_toggle_bits(X86_CR4_TSD);
297 if ((tifp ^ tifn) & _TIF_NOCPUID)
298 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
302 * Idle related variables and functions
304 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
305 EXPORT_SYMBOL(boot_option_idle_override);
307 static void (*x86_idle)(void);
310 static inline void play_dead(void)
316 void arch_cpu_idle_enter(void)
318 tsc_verify_tsc_adjust(false);
322 void arch_cpu_idle_dead(void)
328 * Called from the generic idle code.
330 void arch_cpu_idle(void)
336 * We use this if we don't have any better idle routine..
338 void __cpuidle default_idle(void)
340 trace_cpu_idle_rcuidle(1, smp_processor_id());
342 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
344 #ifdef CONFIG_APM_MODULE
345 EXPORT_SYMBOL(default_idle);
349 bool xen_set_default_idle(void)
351 bool ret = !!x86_idle;
353 x86_idle = default_idle;
359 void stop_this_cpu(void *dummy)
365 set_cpu_online(smp_processor_id(), false);
366 disable_local_APIC();
367 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
371 * Use wbinvd followed by hlt to stop the processor. This
372 * provides support for kexec on a processor that supports
373 * SME. With kexec, going from SME inactive to SME active
374 * requires clearing cache entries so that addresses without
375 * the encryption bit set don't corrupt the same physical
376 * address that has the encryption bit set when caches are
377 * flushed. To achieve this a wbinvd is performed followed by
378 * a hlt. Even if the processor is not in the kexec/SME
379 * scenario this only adds a wbinvd to a halting processor.
381 asm volatile("wbinvd; hlt" : : : "memory");
386 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
387 * states (local apic timer and TSC stop).
389 static void amd_e400_idle(void)
392 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
393 * gets set after static_cpu_has() places have been converted via
396 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
401 tick_broadcast_enter();
406 * The switch back from broadcast mode needs to be called with
407 * interrupts disabled.
410 tick_broadcast_exit();
415 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
416 * We can't rely on cpuidle installing MWAIT, because it will not load
417 * on systems that support only C1 -- so the boot default must be MWAIT.
419 * Some AMD machines are the opposite, they depend on using HALT.
421 * So for default C1, which is used during boot until cpuidle loads,
422 * use MWAIT-C1 on Intel HW that has it, else use HALT.
424 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
426 if (c->x86_vendor != X86_VENDOR_INTEL)
429 if (!cpu_has(c, X86_FEATURE_MWAIT) || static_cpu_has_bug(X86_BUG_MONITOR))
436 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
437 * with interrupts enabled and no flags, which is backwards compatible with the
438 * original MWAIT implementation.
440 static __cpuidle void mwait_idle(void)
442 if (!current_set_polling_and_test()) {
443 trace_cpu_idle_rcuidle(1, smp_processor_id());
444 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
446 clflush((void *)¤t_thread_info()->flags);
450 __monitor((void *)¤t_thread_info()->flags, 0, 0);
455 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
459 __current_clr_polling();
462 void select_idle_routine(const struct cpuinfo_x86 *c)
465 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
466 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
468 if (x86_idle || boot_option_idle_override == IDLE_POLL)
471 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
472 pr_info("using AMD E400 aware idle routine\n");
473 x86_idle = amd_e400_idle;
474 } else if (prefer_mwait_c1_over_halt(c)) {
475 pr_info("using mwait in idle threads\n");
476 x86_idle = mwait_idle;
478 x86_idle = default_idle;
481 void amd_e400_c1e_apic_setup(void)
483 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
484 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
486 tick_broadcast_force();
491 void __init arch_post_acpi_subsys_init(void)
495 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
499 * AMD E400 detection needs to happen after ACPI has been enabled. If
500 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
501 * MSR_K8_INT_PENDING_MSG.
503 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
504 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
507 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
509 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
510 mark_tsc_unstable("TSC halt in AMD C1E");
511 pr_info("System has AMD C1E enabled\n");
514 static int __init idle_setup(char *str)
519 if (!strcmp(str, "poll")) {
520 pr_info("using polling idle threads\n");
521 boot_option_idle_override = IDLE_POLL;
522 cpu_idle_poll_ctrl(true);
523 } else if (!strcmp(str, "halt")) {
525 * When the boot option of idle=halt is added, halt is
526 * forced to be used for CPU idle. In such case CPU C2/C3
527 * won't be used again.
528 * To continue to load the CPU idle driver, don't touch
529 * the boot_option_idle_override.
531 x86_idle = default_idle;
532 boot_option_idle_override = IDLE_HALT;
533 } else if (!strcmp(str, "nomwait")) {
535 * If the boot option of "idle=nomwait" is added,
536 * it means that mwait will be disabled for CPU C2/C3
537 * states. In such case it won't touch the variable
538 * of boot_option_idle_override.
540 boot_option_idle_override = IDLE_NOMWAIT;
546 early_param("idle", idle_setup);
548 unsigned long arch_align_stack(unsigned long sp)
550 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
551 sp -= get_random_int() % 8192;
555 unsigned long arch_randomize_brk(struct mm_struct *mm)
557 return randomize_page(mm->brk, 0x02000000);
561 * Called from fs/proc with a reference on @p to find the function
562 * which called into schedule(). This needs to be done carefully
563 * because the task might wake up and we might look at a stack
566 unsigned long get_wchan(struct task_struct *p)
568 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
571 if (!p || p == current || p->state == TASK_RUNNING)
574 if (!try_get_task_stack(p))
577 start = (unsigned long)task_stack_page(p);
582 * Layout of the stack page:
584 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
586 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
588 * ----------- bottom = start
590 * The tasks stack pointer points at the location where the
591 * framepointer is stored. The data on the stack is:
592 * ... IP FP ... IP FP
594 * We need to read FP and IP, so we need to adjust the upper
595 * bound by another unsigned long.
597 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
598 top -= 2 * sizeof(unsigned long);
601 sp = READ_ONCE(p->thread.sp);
602 if (sp < bottom || sp > top)
605 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
607 if (fp < bottom || fp > top)
609 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
610 if (!in_sched_functions(ip)) {
614 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
615 } while (count++ < 16 && p->state != TASK_RUNNING);
622 long do_arch_prctl_common(struct task_struct *task, int option,
623 unsigned long cpuid_enabled)
627 return get_cpuid_mode();
629 return set_cpuid_mode(task, cpuid_enabled);