2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
41 #include <asm/pgtable.h>
42 #include <asm/processor.h>
43 #include <asm/fpu/internal.h>
44 #include <asm/mmu_context.h>
45 #include <asm/prctl.h>
47 #include <asm/proto.h>
49 #include <asm/syscalls.h>
50 #include <asm/debugreg.h>
51 #include <asm/switch_to.h>
52 #include <asm/xen/hypervisor.h>
54 #include <asm/intel_rdt.h>
56 __visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
58 /* Prints also some state that isn't saved in the pt_regs */
59 void __show_regs(struct pt_regs *regs, int all)
61 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
62 unsigned long d0, d1, d2, d3, d6, d7;
63 unsigned int fsindex, gsindex;
64 unsigned int ds, cs, es;
66 printk(KERN_DEFAULT "RIP: %04lx:%pS\n", regs->cs & 0xffff,
68 printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx", regs->ss,
69 regs->sp, regs->flags);
70 if (regs->orig_ax != -1)
71 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
75 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
76 regs->ax, regs->bx, regs->cx);
77 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
78 regs->dx, regs->si, regs->di);
79 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
80 regs->bp, regs->r8, regs->r9);
81 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
82 regs->r10, regs->r11, regs->r12);
83 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
84 regs->r13, regs->r14, regs->r15);
86 asm("movl %%ds,%0" : "=r" (ds));
87 asm("movl %%cs,%0" : "=r" (cs));
88 asm("movl %%es,%0" : "=r" (es));
89 asm("movl %%fs,%0" : "=r" (fsindex));
90 asm("movl %%gs,%0" : "=r" (gsindex));
92 rdmsrl(MSR_FS_BASE, fs);
93 rdmsrl(MSR_GS_BASE, gs);
94 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
104 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
105 fs, fsindex, gs, gsindex, shadowgs);
106 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
108 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
118 /* Only print out debug registers if they are in their non-default state. */
119 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
120 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
121 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
123 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
127 if (boot_cpu_has(X86_FEATURE_OSPKE))
128 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
131 void release_thread(struct task_struct *dead_task)
134 #ifdef CONFIG_MODIFY_LDT_SYSCALL
135 if (dead_task->mm->context.ldt) {
136 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
138 dead_task->mm->context.ldt->entries,
139 dead_task->mm->context.ldt->size);
146 int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
147 unsigned long arg, struct task_struct *p, unsigned long tls)
150 struct pt_regs *childregs;
151 struct fork_frame *fork_frame;
152 struct inactive_task_frame *frame;
153 struct task_struct *me = current;
155 p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
156 childregs = task_pt_regs(p);
157 fork_frame = container_of(childregs, struct fork_frame, regs);
158 frame = &fork_frame->frame;
160 frame->ret_addr = (unsigned long) ret_from_fork;
161 p->thread.sp = (unsigned long) fork_frame;
162 p->thread.io_bitmap_ptr = NULL;
164 savesegment(gs, p->thread.gsindex);
165 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
166 savesegment(fs, p->thread.fsindex);
167 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
168 savesegment(es, p->thread.es);
169 savesegment(ds, p->thread.ds);
170 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
172 if (unlikely(p->flags & PF_KTHREAD)) {
174 memset(childregs, 0, sizeof(struct pt_regs));
175 frame->bx = sp; /* function */
180 *childregs = *current_pt_regs();
187 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
188 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
189 IO_BITMAP_BYTES, GFP_KERNEL);
190 if (!p->thread.io_bitmap_ptr) {
191 p->thread.io_bitmap_max = 0;
194 set_tsk_thread_flag(p, TIF_IO_BITMAP);
198 * Set a new TLS for the child thread?
200 if (clone_flags & CLONE_SETTLS) {
201 #ifdef CONFIG_IA32_EMULATION
202 if (in_ia32_syscall())
203 err = do_set_thread_area(p, -1,
204 (struct user_desc __user *)tls, 0);
207 err = do_arch_prctl(p, ARCH_SET_FS, tls);
213 if (err && p->thread.io_bitmap_ptr) {
214 kfree(p->thread.io_bitmap_ptr);
215 p->thread.io_bitmap_max = 0;
222 start_thread_common(struct pt_regs *regs, unsigned long new_ip,
223 unsigned long new_sp,
224 unsigned int _cs, unsigned int _ss, unsigned int _ds)
227 loadsegment(es, _ds);
228 loadsegment(ds, _ds);
234 regs->flags = X86_EFLAGS_IF;
239 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
241 start_thread_common(regs, new_ip, new_sp,
242 __USER_CS, __USER_DS, 0);
246 void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
248 start_thread_common(regs, new_ip, new_sp,
249 test_thread_flag(TIF_X32)
250 ? __USER_CS : __USER32_CS,
251 __USER_DS, __USER_DS);
256 * switch_to(x,y) should switch tasks from x to y.
258 * This could still be optimized:
259 * - fold all the options into a flag word and test it with a single test.
260 * - could test fs/gs bitsliced
262 * Kprobes not supported here. Set the probe on schedule instead.
263 * Function graph tracer not supported too.
265 __visible __notrace_funcgraph struct task_struct *
266 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
268 struct thread_struct *prev = &prev_p->thread;
269 struct thread_struct *next = &next_p->thread;
270 struct fpu *prev_fpu = &prev->fpu;
271 struct fpu *next_fpu = &next->fpu;
272 int cpu = smp_processor_id();
273 struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
274 unsigned prev_fsindex, prev_gsindex;
276 switch_fpu_prepare(prev_fpu, cpu);
278 /* We must save %fs and %gs before load_TLS() because
279 * %fs and %gs may be cleared by load_TLS().
281 * (e.g. xen_load_tls())
283 savesegment(fs, prev_fsindex);
284 savesegment(gs, prev_gsindex);
287 * Load TLS before restoring any segments so that segment loads
288 * reference the correct GDT entries.
293 * Leave lazy mode, flushing any hypercalls made here. This
294 * must be done after loading TLS entries in the GDT but before
295 * loading segments that might reference them, and and it must
296 * be done before fpu__restore(), so the TS bit is up to
299 arch_end_context_switch(next_p);
303 * Reading them only returns the selectors, but writing them (if
304 * nonzero) loads the full descriptor from the GDT or LDT. The
305 * LDT for next is loaded in switch_mm, and the GDT is loaded
308 * We therefore need to write new values to the segment
309 * registers on every context switch unless both the new and old
312 * Note that we don't need to do anything for CS and SS, as
313 * those are saved and restored as part of pt_regs.
315 savesegment(es, prev->es);
316 if (unlikely(next->es | prev->es))
317 loadsegment(es, next->es);
319 savesegment(ds, prev->ds);
320 if (unlikely(next->ds | prev->ds))
321 loadsegment(ds, next->ds);
326 * These are even more complicated than DS and ES: they have
327 * 64-bit bases are that controlled by arch_prctl. The bases
328 * don't necessarily match the selectors, as user code can do
329 * any number of things to cause them to be inconsistent.
331 * We don't promise to preserve the bases if the selectors are
332 * nonzero. We also don't promise to preserve the base if the
333 * selector is zero and the base doesn't match whatever was
334 * most recently passed to ARCH_SET_FS/GS. (If/when the
335 * FSGSBASE instructions are enabled, we'll need to offer
336 * stronger guarantees.)
339 * (fsbase != 0 && fsindex != 0) || (gsbase != 0 && gsindex != 0) is
343 /* Loading a nonzero value into FS sets the index and base. */
344 loadsegment(fs, next->fsindex);
347 /* Next index is zero but next base is nonzero. */
350 wrmsrl(MSR_FS_BASE, next->fsbase);
352 /* Next base and index are both zero. */
353 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
355 * We don't know the previous base and can't
356 * find out without RDMSR. Forcibly clear it.
358 loadsegment(fs, __USER_DS);
362 * If the previous index is zero and ARCH_SET_FS
363 * didn't change the base, then the base is
364 * also zero and we don't need to do anything.
366 if (prev->fsbase || prev_fsindex)
372 * Save the old state and preserve the invariant.
373 * NB: if prev_fsindex == 0, then we can't reliably learn the base
374 * without RDMSR because Intel user code can zero it without telling
375 * us and AMD user code can program any 32-bit value without telling
380 prev->fsindex = prev_fsindex;
383 /* Loading a nonzero value into GS sets the index and base. */
384 load_gs_index(next->gsindex);
387 /* Next index is zero but next base is nonzero. */
390 wrmsrl(MSR_KERNEL_GS_BASE, next->gsbase);
392 /* Next base and index are both zero. */
393 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
395 * We don't know the previous base and can't
396 * find out without RDMSR. Forcibly clear it.
398 * This contains a pointless SWAPGS pair.
399 * Fixing it would involve an explicit check
400 * for Xen or a new pvop.
402 load_gs_index(__USER_DS);
406 * If the previous index is zero and ARCH_SET_GS
407 * didn't change the base, then the base is
408 * also zero and we don't need to do anything.
410 if (prev->gsbase || prev_gsindex)
416 * Save the old state and preserve the invariant.
417 * NB: if prev_gsindex == 0, then we can't reliably learn the base
418 * without RDMSR because Intel user code can zero it without telling
419 * us and AMD user code can program any 32-bit value without telling
424 prev->gsindex = prev_gsindex;
426 switch_fpu_finish(next_fpu, cpu);
429 * Switch the PDA and FPU contexts.
431 this_cpu_write(current_task, next_p);
433 /* Reload esp0 and ss1. This changes current_thread_info(). */
437 * Now maybe reload the debug registers and handle I/O bitmaps
439 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
440 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
441 __switch_to_xtra(prev_p, next_p, tss);
445 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
446 * current_pt_regs()->flags may not match the current task's
447 * intended IOPL. We need to switch it manually.
449 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
450 prev->iopl != next->iopl))
451 xen_set_iopl_mask(next->iopl);
454 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
456 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
457 * does not update the cached descriptor. As a result, if we
458 * do SYSRET while SS is NULL, we'll end up in user mode with
459 * SS apparently equal to __USER_DS but actually unusable.
461 * The straightforward workaround would be to fix it up just
462 * before SYSRET, but that would slow down the system call
463 * fast paths. Instead, we ensure that SS is never NULL in
464 * system call context. We do this by replacing NULL SS
465 * selectors at every context switch. SYSCALL sets up a valid
466 * SS, so the only way to get NULL is to re-enter the kernel
467 * from CPL 3 through an interrupt. Since that can't happen
468 * in the same task as a running syscall, we are guaranteed to
469 * context switch between every interrupt vector entry and a
472 * We read SS first because SS reads are much faster than
473 * writes. Out of caution, we force SS to __KERNEL_DS even if
474 * it previously had a different non-NULL value.
476 unsigned short ss_sel;
477 savesegment(ss, ss_sel);
478 if (ss_sel != __KERNEL_DS)
479 loadsegment(ss, __KERNEL_DS);
482 /* Load the Intel cache allocation PQR MSR. */
483 intel_rdt_sched_in();
488 void set_personality_64bit(void)
490 /* inherit personality from parent */
492 /* Make sure to be in 64bit mode */
493 clear_thread_flag(TIF_IA32);
494 clear_thread_flag(TIF_ADDR32);
495 clear_thread_flag(TIF_X32);
497 /* Ensure the corresponding mm is not marked. */
499 current->mm->context.ia32_compat = 0;
501 /* TBD: overwrites user setup. Should have two bits.
502 But 64bit processes have always behaved this way,
503 so it's not too bad. The main problem is just that
504 32bit childs are affected again. */
505 current->personality &= ~READ_IMPLIES_EXEC;
508 void set_personality_ia32(bool x32)
510 /* inherit personality from parent */
512 /* Make sure to be in 32bit mode */
513 set_thread_flag(TIF_ADDR32);
515 /* Mark the associated mm as containing 32-bit tasks. */
517 clear_thread_flag(TIF_IA32);
518 set_thread_flag(TIF_X32);
520 current->mm->context.ia32_compat = TIF_X32;
521 current->personality &= ~READ_IMPLIES_EXEC;
522 /* in_compat_syscall() uses the presence of the x32
523 syscall bit flag to determine compat status */
524 current->thread.status &= ~TS_COMPAT;
526 set_thread_flag(TIF_IA32);
527 clear_thread_flag(TIF_X32);
529 current->mm->context.ia32_compat = TIF_IA32;
530 current->personality |= force_personality32;
531 /* Prepare the first "return" to user space */
532 current->thread.status |= TS_COMPAT;
535 EXPORT_SYMBOL_GPL(set_personality_ia32);
537 #ifdef CONFIG_CHECKPOINT_RESTORE
538 static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
542 ret = map_vdso_once(image, addr);
546 return (long)image->size;
550 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
553 int doit = task == current;
558 if (addr >= TASK_SIZE_MAX)
561 task->thread.gsindex = 0;
562 task->thread.gsbase = addr;
565 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, addr);
570 /* Not strictly needed for fs, but do it for symmetry
572 if (addr >= TASK_SIZE_MAX)
575 task->thread.fsindex = 0;
576 task->thread.fsbase = addr;
578 /* set the selector to 0 to not confuse __switch_to */
580 ret = wrmsrl_safe(MSR_FS_BASE, addr);
587 rdmsrl(MSR_FS_BASE, base);
589 base = task->thread.fsbase;
590 ret = put_user(base, (unsigned long __user *)addr);
596 rdmsrl(MSR_KERNEL_GS_BASE, base);
598 base = task->thread.gsbase;
599 ret = put_user(base, (unsigned long __user *)addr);
603 #ifdef CONFIG_CHECKPOINT_RESTORE
604 # ifdef CONFIG_X86_X32_ABI
605 case ARCH_MAP_VDSO_X32:
606 return prctl_map_vdso(&vdso_image_x32, addr);
608 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
609 case ARCH_MAP_VDSO_32:
610 return prctl_map_vdso(&vdso_image_32, addr);
612 case ARCH_MAP_VDSO_64:
613 return prctl_map_vdso(&vdso_image_64, addr);
624 long sys_arch_prctl(int code, unsigned long addr)
626 return do_arch_prctl(current, code, addr);
629 unsigned long KSTK_ESP(struct task_struct *task)
631 return task_pt_regs(task)->sp;