2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _FPU_INTERNAL_H
11 #define _FPU_INTERNAL_H
13 #include <linux/kernel_stat.h>
14 #include <linux/regset.h>
15 #include <linux/slab.h>
17 #include <asm/cpufeature.h>
18 #include <asm/processor.h>
19 #include <asm/sigcontext.h>
21 #include <asm/uaccess.h>
22 #include <asm/xsave.h>
24 extern unsigned int sig_xstate_size;
25 extern void fpu_init(void);
27 DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
29 extern user_regset_active_fn fpregs_active, xfpregs_active;
30 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
32 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
37 * xstateregs_active == fpregs_active. Please refer to the comment
38 * at the definition of fpregs_active.
40 #define xstateregs_active fpregs_active
42 extern struct _fpx_sw_bytes fx_sw_reserved;
43 #ifdef CONFIG_IA32_EMULATION
44 extern unsigned int sig_xstate_ia32_size;
45 extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
48 extern int save_i387_xstate_ia32(void __user *buf);
49 extern int restore_i387_xstate_ia32(void __user *buf);
52 #ifdef CONFIG_MATH_EMULATION
53 extern void finit_soft_fpu(struct i387_soft_struct *soft);
55 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
58 #define X87_FSW_ES (1 << 7) /* Exception Summary */
60 static __always_inline __pure bool use_xsaveopt(void)
62 return static_cpu_has(X86_FEATURE_XSAVEOPT);
65 static __always_inline __pure bool use_xsave(void)
67 return static_cpu_has(X86_FEATURE_XSAVE);
70 static __always_inline __pure bool use_fxsr(void)
72 return static_cpu_has(X86_FEATURE_FXSR);
75 extern void __sanitize_i387_state(struct task_struct *);
77 static inline void sanitize_i387_state(struct task_struct *tsk)
81 __sanitize_i387_state(tsk);
85 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
89 /* See comment in fxsave() below. */
90 #ifdef CONFIG_AS_FXSAVEQ
91 asm volatile("1: fxrstorq %[fx]\n\t"
93 ".section .fixup,\"ax\"\n"
94 "3: movl $-1,%[err]\n"
99 : [fx] "m" (*fx), "0" (0));
101 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
103 ".section .fixup,\"ax\"\n"
104 "3: movl $-1,%[err]\n"
109 : [fx] "R" (fx), "m" (*fx), "0" (0));
114 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
119 * Clear the bytes not touched by the fxsave and reserved
122 err = __clear_user(&fx->sw_reserved,
123 sizeof(struct _fpx_sw_bytes));
127 /* See comment in fxsave() below. */
128 #ifdef CONFIG_AS_FXSAVEQ
129 asm volatile("1: fxsaveq %[fx]\n\t"
131 ".section .fixup,\"ax\"\n"
132 "3: movl $-1,%[err]\n"
136 : [err] "=r" (err), [fx] "=m" (*fx)
139 asm volatile("1: rex64/fxsave (%[fx])\n\t"
141 ".section .fixup,\"ax\"\n"
142 "3: movl $-1,%[err]\n"
146 : [err] "=r" (err), "=m" (*fx)
147 : [fx] "R" (fx), "0" (0));
150 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
152 /* No need to clear here because the caller clears USED_MATH */
156 static inline void fpu_fxsave(struct fpu *fpu)
158 /* Using "rex64; fxsave %0" is broken because, if the memory operand
159 uses any extended registers for addressing, a second REX prefix
160 will be generated (to the assembler, rex64 followed by semicolon
161 is a separate instruction), and hence the 64-bitness is lost. */
163 #ifdef CONFIG_AS_FXSAVEQ
164 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
165 starting with gas 2.16. */
166 __asm__ __volatile__("fxsaveq %0"
167 : "=m" (fpu->state->fxsave));
169 /* Using, as a workaround, the properly prefixed form below isn't
170 accepted by any binutils version so far released, complaining that
171 the same type of prefix is used twice if an extended register is
172 needed for addressing (fix submitted to mainline 2005-11-21).
173 asm volatile("rex64/fxsave %0"
174 : "=m" (fpu->state->fxsave));
175 This, however, we can work around by forcing the compiler to select
176 an addressing mode that doesn't require extended registers. */
177 asm volatile("rex64/fxsave (%[fx])"
178 : "=m" (fpu->state->fxsave)
179 : [fx] "R" (&fpu->state->fxsave));
183 #else /* CONFIG_X86_32 */
185 /* perform fxrstor iff the processor has extended states, otherwise frstor */
186 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
189 * The "nop" is needed to make the instructions the same
201 static inline void fpu_fxsave(struct fpu *fpu)
203 asm volatile("fxsave %[fx]"
204 : [fx] "=m" (fpu->state->fxsave));
207 #endif /* CONFIG_X86_64 */
210 * These must be called with preempt disabled. Returns
211 * 'true' if the FPU state is still intact.
213 static inline int fpu_save_init(struct fpu *fpu)
219 * xsave header may indicate the init state of the FP.
221 if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
223 } else if (use_fxsr()) {
226 asm volatile("fnsave %[fx]; fwait"
227 : [fx] "=m" (fpu->state->fsave));
232 * If exceptions are pending, we need to clear them so
233 * that we don't randomly get exceptions later.
235 * FIXME! Is this perhaps only true for the old-style
236 * irq13 case? Maybe we could leave the x87 state
239 if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
240 asm volatile("fnclex");
246 static inline int __save_init_fpu(struct task_struct *tsk)
248 return fpu_save_init(&tsk->thread.fpu);
251 static inline int fpu_fxrstor_checking(struct fpu *fpu)
253 return fxrstor_checking(&fpu->state->fxsave);
256 static inline int fpu_restore_checking(struct fpu *fpu)
259 return fpu_xrstor_checking(fpu);
261 return fpu_fxrstor_checking(fpu);
264 static inline int restore_fpu_checking(struct task_struct *tsk)
266 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
267 is pending. Clear the x87 state here by setting it to fixed
268 values. "m" is a random variable that should be in L1 */
271 "emms\n\t" /* clear stack tags */
272 "fildl %P[addr]", /* set F?P to defined value */
273 X86_FEATURE_FXSAVE_LEAK,
274 [addr] "m" (tsk->thread.fpu.has_fpu));
276 return fpu_restore_checking(&tsk->thread.fpu);
280 * Software FPU state helpers. Careful: these need to
281 * be preemption protection *and* they need to be
282 * properly paired with the CR0.TS changes!
284 static inline int __thread_has_fpu(struct task_struct *tsk)
286 return tsk->thread.fpu.has_fpu;
289 /* Must be paired with an 'stts' after! */
290 static inline void __thread_clear_has_fpu(struct task_struct *tsk)
292 tsk->thread.fpu.has_fpu = 0;
293 percpu_write(fpu_owner_task, NULL);
296 /* Must be paired with a 'clts' before! */
297 static inline void __thread_set_has_fpu(struct task_struct *tsk)
299 tsk->thread.fpu.has_fpu = 1;
300 percpu_write(fpu_owner_task, tsk);
304 * Encapsulate the CR0.TS handling together with the
307 * These generally need preemption protection to work,
308 * do try to avoid using these on their own.
310 static inline void __thread_fpu_end(struct task_struct *tsk)
312 __thread_clear_has_fpu(tsk);
316 static inline void __thread_fpu_begin(struct task_struct *tsk)
319 __thread_set_has_fpu(tsk);
323 * FPU state switching for scheduling.
325 * This is a two-stage process:
327 * - switch_fpu_prepare() saves the old state and
328 * sets the new state of the CR0.TS bit. This is
329 * done within the context of the old process.
331 * - switch_fpu_finish() restores the new state as
334 typedef struct { int preload; } fpu_switch_t;
337 * FIXME! We could do a totally lazy restore, but we need to
338 * add a per-cpu "this was the task that last touched the FPU
339 * on this CPU" variable, and the task needs to have a "I last
340 * touched the FPU on this CPU" and check them.
342 * We don't do that yet, so "fpu_lazy_restore()" always returns
343 * false, but some day..
345 static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
347 return new == percpu_read_stable(fpu_owner_task) &&
348 cpu == new->thread.fpu.last_cpu;
351 static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
355 fpu.preload = tsk_used_math(new) && new->fpu_counter > 5;
356 if (__thread_has_fpu(old)) {
357 if (!__save_init_fpu(old))
359 old->thread.fpu.last_cpu = cpu;
360 old->thread.fpu.has_fpu = 0; /* But leave fpu_owner_task! */
362 /* Don't change CR0.TS if we just switch! */
365 __thread_set_has_fpu(new);
366 prefetch(new->thread.fpu.state);
370 old->fpu_counter = 0;
371 old->thread.fpu.last_cpu = ~0;
374 if (fpu_lazy_restore(new, cpu))
377 prefetch(new->thread.fpu.state);
378 __thread_fpu_begin(new);
385 * By the time this gets called, we've already cleared CR0.TS and
386 * given the process the FPU if we are going to preload the FPU
387 * state - all we need to do is to conditionally restore the register
390 static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
393 if (unlikely(restore_fpu_checking(new)))
394 __thread_fpu_end(new);
399 * Signal frame handlers...
401 extern int save_i387_xstate(void __user *buf);
402 extern int restore_i387_xstate(void __user *buf);
404 static inline void __clear_fpu(struct task_struct *tsk)
406 if (__thread_has_fpu(tsk)) {
407 /* Ignore delayed exceptions from user space */
408 asm volatile("1: fwait\n"
410 _ASM_EXTABLE(1b, 2b));
411 __thread_fpu_end(tsk);
416 * The actual user_fpu_begin/end() functions
417 * need to be preemption-safe.
419 * NOTE! user_fpu_end() must be used only after you
420 * have saved the FP state, and user_fpu_begin() must
421 * be used only immediately before restoring it.
422 * These functions do not do any save/restore on
425 static inline void user_fpu_end(void)
428 __thread_fpu_end(current);
432 static inline void user_fpu_begin(void)
436 __thread_fpu_begin(current);
441 * These disable preemption on their own and are safe
443 static inline void save_init_fpu(struct task_struct *tsk)
445 WARN_ON_ONCE(!__thread_has_fpu(tsk));
447 __save_init_fpu(tsk);
448 __thread_fpu_end(tsk);
452 static inline void clear_fpu(struct task_struct *tsk)
460 * i387 state interaction
462 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
465 return tsk->thread.fpu.state->fxsave.cwd;
467 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
471 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
474 return tsk->thread.fpu.state->fxsave.swd;
476 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
480 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
483 return tsk->thread.fpu.state->fxsave.mxcsr;
485 return MXCSR_DEFAULT;
489 static bool fpu_allocated(struct fpu *fpu)
491 return fpu->state != NULL;
494 static inline int fpu_alloc(struct fpu *fpu)
496 if (fpu_allocated(fpu))
498 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
501 WARN_ON((unsigned long)fpu->state & 15);
505 static inline void fpu_free(struct fpu *fpu)
508 kmem_cache_free(task_xstate_cachep, fpu->state);
513 static inline void fpu_copy(struct fpu *dst, struct fpu *src)
515 memcpy(dst->state, src->state, xstate_size);
518 extern void fpu_finit(struct fpu *fpu);