1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/errno.h>
5 #include <linux/kernel.h>
8 #include <linux/prctl.h>
9 #include <linux/slab.h>
10 #include <linux/sched.h>
11 #include <linux/sched/idle.h>
12 #include <linux/sched/debug.h>
13 #include <linux/sched/task.h>
14 #include <linux/sched/task_stack.h>
15 #include <linux/init.h>
16 #include <linux/export.h>
18 #include <linux/tick.h>
19 #include <linux/random.h>
20 #include <linux/user-return-notifier.h>
21 #include <linux/dmi.h>
22 #include <linux/utsname.h>
23 #include <linux/stackprotector.h>
24 #include <linux/cpuidle.h>
25 #include <linux/acpi.h>
26 #include <linux/elf-randomize.h>
27 #include <trace/events/power.h>
28 #include <linux/hw_breakpoint.h>
31 #include <linux/uaccess.h>
32 #include <asm/mwait.h>
33 #include <asm/fpu/internal.h>
34 #include <asm/debugreg.h>
36 #include <asm/tlbflush.h>
39 #include <asm/switch_to.h>
41 #include <asm/prctl.h>
42 #include <asm/spec-ctrl.h>
43 #include <asm/io_bitmap.h>
44 #include <asm/proto.h>
45 #include <asm/frame.h>
50 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
51 * no more per-task TSS's. The TSS size is kept cacheline-aligned
52 * so they are allowed to end up in the .data..cacheline_aligned
53 * section. Since TSS's are completely CPU-local, we want them
54 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
56 __visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
59 * .sp0 is only used when entering ring 0 from a lower
60 * privilege level. Since the init task never runs anything
61 * but ring 0 code, there is no need for a valid value here.
64 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
67 .sp1 = TOP_OF_INIT_STACK,
72 .io_bitmap_base = IO_BITMAP_OFFSET_INVALID,
75 EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
77 DEFINE_PER_CPU(bool, __tss_limit_invalid);
78 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
81 * this gets called so that we can store lazy state into memory and copy the
82 * current task into the new thread.
84 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
86 memcpy(dst, src, arch_task_struct_size);
88 dst->thread.vm86 = NULL;
91 return fpu__copy(dst, src);
95 * Free thread data structures etc..
97 void exit_thread(struct task_struct *tsk)
99 struct thread_struct *t = &tsk->thread;
100 struct fpu *fpu = &t->fpu;
102 if (test_thread_flag(TIF_IO_BITMAP))
110 static int set_new_tls(struct task_struct *p, unsigned long tls)
112 struct user_desc __user *utls = (struct user_desc __user *)tls;
114 if (in_ia32_syscall())
115 return do_set_thread_area(p, -1, utls, 0);
117 return do_set_thread_area_64(p, ARCH_SET_FS, tls);
120 int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
121 struct task_struct *p, unsigned long tls)
123 struct inactive_task_frame *frame;
124 struct fork_frame *fork_frame;
125 struct pt_regs *childregs;
128 childregs = task_pt_regs(p);
129 fork_frame = container_of(childregs, struct fork_frame, regs);
130 frame = &fork_frame->frame;
132 frame->bp = encode_frame_pointer(childregs);
133 frame->ret_addr = (unsigned long) ret_from_fork;
134 p->thread.sp = (unsigned long) fork_frame;
135 p->thread.io_bitmap = NULL;
136 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
140 p->thread.fsindex = current->thread.fsindex;
141 p->thread.fsbase = current->thread.fsbase;
142 p->thread.gsindex = current->thread.gsindex;
143 p->thread.gsbase = current->thread.gsbase;
145 savesegment(es, p->thread.es);
146 savesegment(ds, p->thread.ds);
148 p->thread.sp0 = (unsigned long) (childregs + 1);
150 * Clear all status flags including IF and set fixed bit. 64bit
151 * does not have this initialization as the frame does not contain
152 * flags. The flags consistency (especially vs. AC) is there
153 * ensured via objtool, which lacks 32bit support.
155 frame->flags = X86_EFLAGS_FIXED;
158 /* Kernel thread ? */
159 if (unlikely(p->flags & PF_KTHREAD)) {
160 memset(childregs, 0, sizeof(struct pt_regs));
161 kthread_frame_init(frame, sp, arg);
166 *childregs = *current_pt_regs();
172 task_user_gs(p) = get_user_gs(current_pt_regs());
175 if (unlikely(p->flags & PF_IO_WORKER)) {
177 * An IO thread is a user space thread, but it doesn't
178 * return to ret_after_fork().
180 * In order to indicate that to tools like gdb,
181 * we reset the stack and instruction pointers.
183 * It does the same kernel frame setup to return to a kernel
184 * function that a kernel thread does.
188 kthread_frame_init(frame, sp, arg);
192 /* Set a new TLS for the child thread? */
193 if (clone_flags & CLONE_SETTLS)
194 ret = set_new_tls(p, tls);
196 if (!ret && unlikely(test_tsk_thread_flag(current, TIF_IO_BITMAP)))
202 void flush_thread(void)
204 struct task_struct *tsk = current;
206 flush_ptrace_hw_breakpoint(tsk);
207 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
209 fpu__clear_all(&tsk->thread.fpu);
212 void disable_TSC(void)
215 if (!test_and_set_thread_flag(TIF_NOTSC))
217 * Must flip the CPU state synchronously with
218 * TIF_NOTSC in the current running context.
220 cr4_set_bits(X86_CR4_TSD);
224 static void enable_TSC(void)
227 if (test_and_clear_thread_flag(TIF_NOTSC))
229 * Must flip the CPU state synchronously with
230 * TIF_NOTSC in the current running context.
232 cr4_clear_bits(X86_CR4_TSD);
236 int get_tsc_mode(unsigned long adr)
240 if (test_thread_flag(TIF_NOTSC))
241 val = PR_TSC_SIGSEGV;
245 return put_user(val, (unsigned int __user *)adr);
248 int set_tsc_mode(unsigned int val)
250 if (val == PR_TSC_SIGSEGV)
252 else if (val == PR_TSC_ENABLE)
260 DEFINE_PER_CPU(u64, msr_misc_features_shadow);
262 static void set_cpuid_faulting(bool on)
266 msrval = this_cpu_read(msr_misc_features_shadow);
267 msrval &= ~MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
268 msrval |= (on << MSR_MISC_FEATURES_ENABLES_CPUID_FAULT_BIT);
269 this_cpu_write(msr_misc_features_shadow, msrval);
270 wrmsrl(MSR_MISC_FEATURES_ENABLES, msrval);
273 static void disable_cpuid(void)
276 if (!test_and_set_thread_flag(TIF_NOCPUID)) {
278 * Must flip the CPU state synchronously with
279 * TIF_NOCPUID in the current running context.
281 set_cpuid_faulting(true);
286 static void enable_cpuid(void)
289 if (test_and_clear_thread_flag(TIF_NOCPUID)) {
291 * Must flip the CPU state synchronously with
292 * TIF_NOCPUID in the current running context.
294 set_cpuid_faulting(false);
299 static int get_cpuid_mode(void)
301 return !test_thread_flag(TIF_NOCPUID);
304 static int set_cpuid_mode(struct task_struct *task, unsigned long cpuid_enabled)
306 if (!boot_cpu_has(X86_FEATURE_CPUID_FAULT))
318 * Called immediately after a successful exec.
320 void arch_setup_new_exec(void)
322 /* If cpuid was previously disabled for this task, re-enable it. */
323 if (test_thread_flag(TIF_NOCPUID))
327 * Don't inherit TIF_SSBD across exec boundary when
328 * PR_SPEC_DISABLE_NOEXEC is used.
330 if (test_thread_flag(TIF_SSBD) &&
331 task_spec_ssb_noexec(current)) {
332 clear_thread_flag(TIF_SSBD);
333 task_clear_spec_ssb_disable(current);
334 task_clear_spec_ssb_noexec(current);
335 speculation_ctrl_update(task_thread_info(current)->flags);
339 #ifdef CONFIG_X86_IOPL_IOPERM
340 static inline void switch_to_bitmap(unsigned long tifp)
343 * Invalidate I/O bitmap if the previous task used it. This prevents
344 * any possible leakage of an active I/O bitmap.
346 * If the next task has an I/O bitmap it will handle it on exit to
349 if (tifp & _TIF_IO_BITMAP)
350 tss_invalidate_io_bitmap();
353 static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
356 * Copy at least the byte range of the incoming tasks bitmap which
357 * covers the permitted I/O ports.
359 * If the previous task which used an I/O bitmap had more bits
360 * permitted, then the copy needs to cover those as well so they
363 memcpy(tss->io_bitmap.bitmap, iobm->bitmap,
364 max(tss->io_bitmap.prev_max, iobm->max));
367 * Store the new max and the sequence number of this bitmap
368 * and a pointer to the bitmap itself.
370 tss->io_bitmap.prev_max = iobm->max;
371 tss->io_bitmap.prev_sequence = iobm->sequence;
375 * tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
377 void native_tss_update_io_bitmap(void)
379 struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
380 struct thread_struct *t = ¤t->thread;
381 u16 *base = &tss->x86_tss.io_bitmap_base;
383 if (!test_thread_flag(TIF_IO_BITMAP)) {
384 native_tss_invalidate_io_bitmap();
388 if (IS_ENABLED(CONFIG_X86_IOPL_IOPERM) && t->iopl_emul == 3) {
389 *base = IO_BITMAP_OFFSET_VALID_ALL;
391 struct io_bitmap *iobm = t->io_bitmap;
394 * Only copy bitmap data when the sequence number differs. The
395 * update time is accounted to the incoming task.
397 if (tss->io_bitmap.prev_sequence != iobm->sequence)
398 tss_copy_io_bitmap(tss, iobm);
400 /* Enable the bitmap */
401 *base = IO_BITMAP_OFFSET_VALID_MAP;
405 * Make sure that the TSS limit is covering the IO bitmap. It might have
406 * been cut down by a VMEXIT to 0x67 which would cause a subsequent I/O
407 * access from user space to trigger a #GP because tbe bitmap is outside
412 #else /* CONFIG_X86_IOPL_IOPERM */
413 static inline void switch_to_bitmap(unsigned long tifp) { }
419 struct ssb_state *shared_state;
421 unsigned int disable_state;
422 unsigned long local_state;
427 static DEFINE_PER_CPU(struct ssb_state, ssb_state);
429 void speculative_store_bypass_ht_init(void)
431 struct ssb_state *st = this_cpu_ptr(&ssb_state);
432 unsigned int this_cpu = smp_processor_id();
438 * Shared state setup happens once on the first bringup
439 * of the CPU. It's not destroyed on CPU hotunplug.
441 if (st->shared_state)
444 raw_spin_lock_init(&st->lock);
447 * Go over HT siblings and check whether one of them has set up the
448 * shared state pointer already.
450 for_each_cpu(cpu, topology_sibling_cpumask(this_cpu)) {
454 if (!per_cpu(ssb_state, cpu).shared_state)
457 /* Link it to the state of the sibling: */
458 st->shared_state = per_cpu(ssb_state, cpu).shared_state;
463 * First HT sibling to come up on the core. Link shared state of
464 * the first HT sibling to itself. The siblings on the same core
465 * which come up later will see the shared state pointer and link
466 * themselves to the state of this CPU.
468 st->shared_state = st;
472 * Logic is: First HT sibling enables SSBD for both siblings in the core
473 * and last sibling to disable it, disables it for the whole core. This how
474 * MSR_SPEC_CTRL works in "hardware":
476 * CORE_SPEC_CTRL = THREAD0_SPEC_CTRL | THREAD1_SPEC_CTRL
478 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
480 struct ssb_state *st = this_cpu_ptr(&ssb_state);
481 u64 msr = x86_amd_ls_cfg_base;
483 if (!static_cpu_has(X86_FEATURE_ZEN)) {
484 msr |= ssbd_tif_to_amd_ls_cfg(tifn);
485 wrmsrl(MSR_AMD64_LS_CFG, msr);
489 if (tifn & _TIF_SSBD) {
491 * Since this can race with prctl(), block reentry on the
494 if (__test_and_set_bit(LSTATE_SSB, &st->local_state))
497 msr |= x86_amd_ls_cfg_ssbd_mask;
499 raw_spin_lock(&st->shared_state->lock);
500 /* First sibling enables SSBD: */
501 if (!st->shared_state->disable_state)
502 wrmsrl(MSR_AMD64_LS_CFG, msr);
503 st->shared_state->disable_state++;
504 raw_spin_unlock(&st->shared_state->lock);
506 if (!__test_and_clear_bit(LSTATE_SSB, &st->local_state))
509 raw_spin_lock(&st->shared_state->lock);
510 st->shared_state->disable_state--;
511 if (!st->shared_state->disable_state)
512 wrmsrl(MSR_AMD64_LS_CFG, msr);
513 raw_spin_unlock(&st->shared_state->lock);
517 static __always_inline void amd_set_core_ssb_state(unsigned long tifn)
519 u64 msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
521 wrmsrl(MSR_AMD64_LS_CFG, msr);
525 static __always_inline void amd_set_ssb_virt_state(unsigned long tifn)
528 * SSBD has the same definition in SPEC_CTRL and VIRT_SPEC_CTRL,
529 * so ssbd_tif_to_spec_ctrl() just works.
531 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn));
535 * Update the MSRs managing speculation control, during context switch.
537 * tifp: Previous task's thread flags
538 * tifn: Next task's thread flags
540 static __always_inline void __speculation_ctrl_update(unsigned long tifp,
543 unsigned long tif_diff = tifp ^ tifn;
544 u64 msr = x86_spec_ctrl_base;
547 lockdep_assert_irqs_disabled();
549 /* Handle change of TIF_SSBD depending on the mitigation method. */
550 if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) {
551 if (tif_diff & _TIF_SSBD)
552 amd_set_ssb_virt_state(tifn);
553 } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
554 if (tif_diff & _TIF_SSBD)
555 amd_set_core_ssb_state(tifn);
556 } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
557 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
558 updmsr |= !!(tif_diff & _TIF_SSBD);
559 msr |= ssbd_tif_to_spec_ctrl(tifn);
562 /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */
563 if (IS_ENABLED(CONFIG_SMP) &&
564 static_branch_unlikely(&switch_to_cond_stibp)) {
565 updmsr |= !!(tif_diff & _TIF_SPEC_IB);
566 msr |= stibp_tif_to_spec_ctrl(tifn);
570 wrmsrl(MSR_IA32_SPEC_CTRL, msr);
573 static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk)
575 if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) {
576 if (task_spec_ssb_disable(tsk))
577 set_tsk_thread_flag(tsk, TIF_SSBD);
579 clear_tsk_thread_flag(tsk, TIF_SSBD);
581 if (task_spec_ib_disable(tsk))
582 set_tsk_thread_flag(tsk, TIF_SPEC_IB);
584 clear_tsk_thread_flag(tsk, TIF_SPEC_IB);
586 /* Return the updated threadinfo flags*/
587 return task_thread_info(tsk)->flags;
590 void speculation_ctrl_update(unsigned long tif)
594 /* Forced update. Make sure all relevant TIF flags are different */
595 local_irq_save(flags);
596 __speculation_ctrl_update(~tif, tif);
597 local_irq_restore(flags);
600 /* Called from seccomp/prctl update */
601 void speculation_ctrl_update_current(void)
604 speculation_ctrl_update(speculation_ctrl_update_tif(current));
608 static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
610 unsigned long newval, cr4 = this_cpu_read(cpu_tlbstate.cr4);
614 this_cpu_write(cpu_tlbstate.cr4, newval);
619 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p)
621 unsigned long tifp, tifn;
623 tifn = READ_ONCE(task_thread_info(next_p)->flags);
624 tifp = READ_ONCE(task_thread_info(prev_p)->flags);
626 switch_to_bitmap(tifp);
628 propagate_user_return_notify(prev_p, next_p);
630 if ((tifp & _TIF_BLOCKSTEP || tifn & _TIF_BLOCKSTEP) &&
631 arch_has_block_step()) {
632 unsigned long debugctl, msk;
634 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
635 debugctl &= ~DEBUGCTLMSR_BTF;
636 msk = tifn & _TIF_BLOCKSTEP;
637 debugctl |= (msk >> TIF_BLOCKSTEP) << DEBUGCTLMSR_BTF_SHIFT;
638 wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
641 if ((tifp ^ tifn) & _TIF_NOTSC)
642 cr4_toggle_bits_irqsoff(X86_CR4_TSD);
644 if ((tifp ^ tifn) & _TIF_NOCPUID)
645 set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
647 if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) {
648 __speculation_ctrl_update(tifp, tifn);
650 speculation_ctrl_update_tif(prev_p);
651 tifn = speculation_ctrl_update_tif(next_p);
653 /* Enforce MSR update to ensure consistent state */
654 __speculation_ctrl_update(~tifn, tifn);
657 if ((tifp ^ tifn) & _TIF_SLD)
662 * Idle related variables and functions
664 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
665 EXPORT_SYMBOL(boot_option_idle_override);
667 static void (*x86_idle)(void);
670 static inline void play_dead(void)
676 void arch_cpu_idle_enter(void)
678 tsc_verify_tsc_adjust(false);
682 void arch_cpu_idle_dead(void)
688 * Called from the generic idle code.
690 void arch_cpu_idle(void)
696 * We use this if we don't have any better idle routine..
698 void __cpuidle default_idle(void)
702 #if defined(CONFIG_APM_MODULE) || defined(CONFIG_HALTPOLL_CPUIDLE_MODULE)
703 EXPORT_SYMBOL(default_idle);
707 bool xen_set_default_idle(void)
709 bool ret = !!x86_idle;
711 x86_idle = default_idle;
717 void stop_this_cpu(void *dummy)
723 set_cpu_online(smp_processor_id(), false);
724 disable_local_APIC();
725 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
728 * Use wbinvd on processors that support SME. This provides support
729 * for performing a successful kexec when going from SME inactive
730 * to SME active (or vice-versa). The cache must be cleared so that
731 * if there are entries with the same physical address, both with and
732 * without the encryption bit, they don't race each other when flushed
733 * and potentially end up with the wrong entry being committed to
736 if (boot_cpu_has(X86_FEATURE_SME))
740 * Use native_halt() so that memory contents don't change
741 * (stack usage and variables) after possibly issuing the
742 * native_wbinvd() above.
749 * AMD Erratum 400 aware idle routine. We handle it the same way as C3 power
750 * states (local apic timer and TSC stop).
752 * XXX this function is completely buggered vs RCU and tracing.
754 static void amd_e400_idle(void)
757 * We cannot use static_cpu_has_bug() here because X86_BUG_AMD_APIC_C1E
758 * gets set after static_cpu_has() places have been converted via
761 if (!boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
766 tick_broadcast_enter();
771 * The switch back from broadcast mode needs to be called with
772 * interrupts disabled.
774 raw_local_irq_disable();
775 tick_broadcast_exit();
776 raw_local_irq_enable();
780 * Intel Core2 and older machines prefer MWAIT over HALT for C1.
781 * We can't rely on cpuidle installing MWAIT, because it will not load
782 * on systems that support only C1 -- so the boot default must be MWAIT.
784 * Some AMD machines are the opposite, they depend on using HALT.
786 * So for default C1, which is used during boot until cpuidle loads,
787 * use MWAIT-C1 on Intel HW that has it, else use HALT.
789 static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
791 if (c->x86_vendor != X86_VENDOR_INTEL)
794 if (!cpu_has(c, X86_FEATURE_MWAIT) || boot_cpu_has_bug(X86_BUG_MONITOR))
801 * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
802 * with interrupts enabled and no flags, which is backwards compatible with the
803 * original MWAIT implementation.
805 static __cpuidle void mwait_idle(void)
807 if (!current_set_polling_and_test()) {
808 if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
810 clflush((void *)¤t_thread_info()->flags);
814 __monitor((void *)¤t_thread_info()->flags, 0, 0);
818 raw_local_irq_enable();
820 raw_local_irq_enable();
822 __current_clr_polling();
825 void select_idle_routine(const struct cpuinfo_x86 *c)
828 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
829 pr_warn_once("WARNING: polling idle and HT enabled, performance may degrade\n");
831 if (x86_idle || boot_option_idle_override == IDLE_POLL)
834 if (boot_cpu_has_bug(X86_BUG_AMD_E400)) {
835 pr_info("using AMD E400 aware idle routine\n");
836 x86_idle = amd_e400_idle;
837 } else if (prefer_mwait_c1_over_halt(c)) {
838 pr_info("using mwait in idle threads\n");
839 x86_idle = mwait_idle;
841 x86_idle = default_idle;
844 void amd_e400_c1e_apic_setup(void)
846 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E)) {
847 pr_info("Switch to broadcast mode on CPU%d\n", smp_processor_id());
849 tick_broadcast_force();
854 void __init arch_post_acpi_subsys_init(void)
858 if (!boot_cpu_has_bug(X86_BUG_AMD_E400))
862 * AMD E400 detection needs to happen after ACPI has been enabled. If
863 * the machine is affected K8_INTP_C1E_ACTIVE_MASK bits are set in
864 * MSR_K8_INT_PENDING_MSG.
866 rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
867 if (!(lo & K8_INTP_C1E_ACTIVE_MASK))
870 boot_cpu_set_bug(X86_BUG_AMD_APIC_C1E);
872 if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
873 mark_tsc_unstable("TSC halt in AMD C1E");
874 pr_info("System has AMD C1E enabled\n");
877 static int __init idle_setup(char *str)
882 if (!strcmp(str, "poll")) {
883 pr_info("using polling idle threads\n");
884 boot_option_idle_override = IDLE_POLL;
885 cpu_idle_poll_ctrl(true);
886 } else if (!strcmp(str, "halt")) {
888 * When the boot option of idle=halt is added, halt is
889 * forced to be used for CPU idle. In such case CPU C2/C3
890 * won't be used again.
891 * To continue to load the CPU idle driver, don't touch
892 * the boot_option_idle_override.
894 x86_idle = default_idle;
895 boot_option_idle_override = IDLE_HALT;
896 } else if (!strcmp(str, "nomwait")) {
898 * If the boot option of "idle=nomwait" is added,
899 * it means that mwait will be disabled for CPU C2/C3
900 * states. In such case it won't touch the variable
901 * of boot_option_idle_override.
903 boot_option_idle_override = IDLE_NOMWAIT;
909 early_param("idle", idle_setup);
911 unsigned long arch_align_stack(unsigned long sp)
913 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
914 sp -= get_random_int() % 8192;
918 unsigned long arch_randomize_brk(struct mm_struct *mm)
920 return randomize_page(mm->brk, 0x02000000);
924 * Called from fs/proc with a reference on @p to find the function
925 * which called into schedule(). This needs to be done carefully
926 * because the task might wake up and we might look at a stack
929 unsigned long get_wchan(struct task_struct *p)
931 unsigned long start, bottom, top, sp, fp, ip, ret = 0;
934 if (p == current || p->state == TASK_RUNNING)
937 if (!try_get_task_stack(p))
940 start = (unsigned long)task_stack_page(p);
945 * Layout of the stack page:
947 * ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
949 * ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
951 * ----------- bottom = start
953 * The tasks stack pointer points at the location where the
954 * framepointer is stored. The data on the stack is:
955 * ... IP FP ... IP FP
957 * We need to read FP and IP, so we need to adjust the upper
958 * bound by another unsigned long.
960 top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
961 top -= 2 * sizeof(unsigned long);
964 sp = READ_ONCE(p->thread.sp);
965 if (sp < bottom || sp > top)
968 fp = READ_ONCE_NOCHECK(((struct inactive_task_frame *)sp)->bp);
970 if (fp < bottom || fp > top)
972 ip = READ_ONCE_NOCHECK(*(unsigned long *)(fp + sizeof(unsigned long)));
973 if (!in_sched_functions(ip)) {
977 fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
978 } while (count++ < 16 && p->state != TASK_RUNNING);
985 long do_arch_prctl_common(struct task_struct *task, int option,
986 unsigned long cpuid_enabled)
990 return get_cpuid_mode();
992 return set_cpuid_mode(task, cpuid_enabled);