1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 1994 Linus Torvalds
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 * x86-64 work by Andi Kleen 2002
11 #ifndef _ASM_X86_FPU_INTERNAL_H
12 #define _ASM_X86_FPU_INTERNAL_H
14 #include <linux/compat.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
19 #include <asm/fpu/api.h>
20 #include <asm/fpu/xstate.h>
21 #include <asm/cpufeature.h>
22 #include <asm/trace/fpu.h>
25 * High level FPU state handling functions:
27 extern void fpu__initialize(struct fpu *fpu);
28 extern void fpu__prepare_read(struct fpu *fpu);
29 extern void fpu__prepare_write(struct fpu *fpu);
30 extern void fpu__save(struct fpu *fpu);
31 extern void fpu__restore(struct fpu *fpu);
32 extern int fpu__restore_sig(void __user *buf, int ia32_frame);
33 extern void fpu__drop(struct fpu *fpu);
34 extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
35 extern void fpu__clear(struct fpu *fpu);
36 extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
37 extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
40 * Boot time FPU initialization functions:
42 extern void fpu__init_cpu(void);
43 extern void fpu__init_system_xstate(void);
44 extern void fpu__init_cpu_xstate(void);
45 extern void fpu__init_system(struct cpuinfo_x86 *c);
46 extern void fpu__init_check_bugs(void);
47 extern void fpu__resume_cpu(void);
48 extern u64 fpu__get_supported_xfeatures_mask(void);
53 #ifdef CONFIG_X86_DEBUG_FPU
54 # define WARN_ON_FPU(x) WARN_ON_ONCE(x)
56 # define WARN_ON_FPU(x) ({ (void)(x); 0; })
60 * FPU related CPU feature flag helper routines:
62 static __always_inline __pure bool use_xsaveopt(void)
64 return static_cpu_has(X86_FEATURE_XSAVEOPT);
67 static __always_inline __pure bool use_xsave(void)
69 return static_cpu_has(X86_FEATURE_XSAVE);
72 static __always_inline __pure bool use_fxsr(void)
74 return static_cpu_has(X86_FEATURE_FXSR);
78 * fpstate handling functions:
81 extern union fpregs_state init_fpstate;
83 extern void fpstate_init(union fpregs_state *state);
84 #ifdef CONFIG_MATH_EMULATION
85 extern void fpstate_init_soft(struct swregs_state *soft);
87 static inline void fpstate_init_soft(struct swregs_state *soft) {}
90 static inline void fpstate_init_xstate(struct xregs_state *xsave)
93 * XRSTORS requires these bits set in xcomp_bv, or it will
96 xsave->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT | xfeatures_mask;
99 static inline void fpstate_init_fxstate(struct fxregs_state *fx)
102 fx->mxcsr = MXCSR_DEFAULT;
104 extern void fpstate_sanitize_xstate(struct fpu *fpu);
106 #define user_insn(insn, output, input...) \
112 asm volatile(ASM_STAC "\n" \
114 "2: " ASM_CLAC "\n" \
115 ".section .fixup,\"ax\"\n" \
116 "3: movl $-1,%[err]\n" \
119 _ASM_EXTABLE(1b, 3b) \
120 : [err] "=r" (err), output \
125 #define kernel_insn(insn, output, input...) \
126 asm volatile("1:" #insn "\n\t" \
128 _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
131 static inline int copy_fregs_to_user(struct fregs_state __user *fx)
133 return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
136 static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
138 if (IS_ENABLED(CONFIG_X86_32))
139 return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
141 return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
145 static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
147 if (IS_ENABLED(CONFIG_X86_32))
148 kernel_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
150 kernel_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
153 static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
155 if (IS_ENABLED(CONFIG_X86_32))
156 return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
158 return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
161 static inline void copy_kernel_to_fregs(struct fregs_state *fx)
163 kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
166 static inline int copy_user_to_fregs(struct fregs_state __user *fx)
168 return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
171 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
173 if (IS_ENABLED(CONFIG_X86_32))
174 asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
176 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
179 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
180 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
181 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
182 #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
183 #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
184 #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
186 #define XSTATE_OP(op, st, lmask, hmask, err) \
187 asm volatile("1:" op "\n\t" \
188 "xor %[err], %[err]\n" \
190 ".pushsection .fixup,\"ax\"\n\t" \
191 "3: movl $-2,%[err]\n\t" \
194 _ASM_EXTABLE(1b, 3b) \
196 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
200 * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact
201 * format and supervisor states in addition to modified optimization in
204 * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT
205 * supports modified optimization which is not supported by XSAVE.
207 * We use XSAVE as a fallback.
209 * The 661 label is defined in the ALTERNATIVE* macros as the address of the
210 * original instruction which gets replaced. We need to use it here as the
211 * address of the instruction where we might get an exception at.
213 #define XSTATE_XSAVE(st, lmask, hmask, err) \
214 asm volatile(ALTERNATIVE_2(XSAVE, \
215 XSAVEOPT, X86_FEATURE_XSAVEOPT, \
216 XSAVES, X86_FEATURE_XSAVES) \
218 "xor %[err], %[err]\n" \
220 ".pushsection .fixup,\"ax\"\n" \
221 "4: movl $-2, %[err]\n" \
224 _ASM_EXTABLE(661b, 4b) \
226 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
230 * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact
233 #define XSTATE_XRESTORE(st, lmask, hmask) \
234 asm volatile(ALTERNATIVE(XRSTOR, \
235 XRSTORS, X86_FEATURE_XSAVES) \
238 _ASM_EXTABLE_HANDLE(661b, 3b, ex_handler_fprestore)\
240 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
244 * This function is called only during boot time when x86 caps are not set
245 * up and alternative can not be used yet.
247 static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
251 u32 hmask = mask >> 32;
254 WARN_ON(system_state != SYSTEM_BOOTING);
256 if (static_cpu_has(X86_FEATURE_XSAVES))
257 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
259 XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
261 /* We should never fault when copying to a kernel buffer: */
266 * This function is called only during boot time when x86 caps are not set
267 * up and alternative can not be used yet.
269 static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
273 u32 hmask = mask >> 32;
276 WARN_ON(system_state != SYSTEM_BOOTING);
278 if (static_cpu_has(X86_FEATURE_XSAVES))
279 XSTATE_OP(XRSTORS, xstate, lmask, hmask, err);
281 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
284 * We should never fault when copying from a kernel buffer, and the FPU
285 * state we set at boot time should be valid.
291 * Save processor xstate to xsave area.
293 static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
297 u32 hmask = mask >> 32;
300 WARN_ON_FPU(!alternatives_patched);
302 XSTATE_XSAVE(xstate, lmask, hmask, err);
304 /* We should never fault when copying to a kernel buffer: */
309 * Restore processor xstate from xsave area.
311 static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
314 u32 hmask = mask >> 32;
316 XSTATE_XRESTORE(xstate, lmask, hmask);
320 * Save xstate to user space xsave area.
322 * We don't use modified optimization because xrstor/xrstors might track
323 * a different application.
325 * We don't use compacted format xsave area for
326 * backward compatibility for old applications which don't understand
327 * compacted format of xsave area.
329 static inline int copy_xregs_to_user(struct xregs_state __user *buf)
334 * Clear the xsave header first, so that reserved fields are
335 * initialized to zero.
337 err = __clear_user(&buf->header, sizeof(buf->header));
342 XSTATE_OP(XSAVE, buf, -1, -1, err);
349 * Restore xstate from user space xsave area.
351 static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
353 struct xregs_state *xstate = ((__force struct xregs_state *)buf);
355 u32 hmask = mask >> 32;
359 XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
366 * These must be called with preempt disabled. Returns
367 * 'true' if the FPU state is still intact and we can
368 * keep registers active.
370 * The legacy FNSAVE instruction cleared all FPU state
371 * unconditionally, so registers are essentially destroyed.
372 * Modern FPU state can be kept in registers, if there are
373 * no pending FP exceptions.
375 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
377 if (likely(use_xsave())) {
378 copy_xregs_to_kernel(&fpu->state.xsave);
381 * AVX512 state is tracked here because its use is
382 * known to slow the max clock speed of the core.
384 if (fpu->state.xsave.header.xfeatures & XFEATURE_MASK_AVX512)
385 fpu->avx512_timestamp = jiffies;
389 if (likely(use_fxsr())) {
390 copy_fxregs_to_kernel(fpu);
395 * Legacy FPU register saving, FNSAVE always clears FPU registers,
396 * so we have to mark them inactive:
398 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
403 static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate, u64 mask)
406 copy_kernel_to_xregs(&fpstate->xsave, mask);
409 copy_kernel_to_fxregs(&fpstate->fxsave);
411 copy_kernel_to_fregs(&fpstate->fsave);
415 static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
418 * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
419 * pending. Clear the x87 state here by setting it to fixed values.
420 * "m" is a random variable that should be in L1.
422 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
426 "fildl %P[addr]" /* set F?P to defined value */
427 : : [addr] "m" (fpstate));
430 __copy_kernel_to_fpregs(fpstate, -1);
433 extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
436 * FPU context switch related helper methods:
439 DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
442 * The in-register FPU state for an FPU context on a CPU is assumed to be
443 * valid if the fpu->last_cpu matches the CPU, and the fpu_fpregs_owner_ctx
446 * If the FPU register state is valid, the kernel can skip restoring the
447 * FPU state from memory.
449 * Any code that clobbers the FPU registers or updates the in-memory
450 * FPU state for a task MUST let the rest of the kernel know that the
451 * FPU registers are no longer valid for this task.
453 * Either one of these invalidation functions is enough. Invalidate
454 * a resource you control: CPU if using the CPU for something else
455 * (with preemption disabled), FPU for the current task, or a task that
456 * is prevented from running by the current task.
458 static inline void __cpu_invalidate_fpregs_state(void)
460 __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
463 static inline void __fpu_invalidate_fpregs_state(struct fpu *fpu)
468 static inline int fpregs_state_valid(struct fpu *fpu, unsigned int cpu)
470 return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
474 * These generally need preemption protection to work,
475 * do try to avoid using these on their own:
477 static inline void fpregs_deactivate(struct fpu *fpu)
479 this_cpu_write(fpu_fpregs_owner_ctx, NULL);
480 trace_x86_fpu_regs_deactivated(fpu);
483 static inline void fpregs_activate(struct fpu *fpu)
485 this_cpu_write(fpu_fpregs_owner_ctx, fpu);
486 trace_x86_fpu_regs_activated(fpu);
490 * FPU state switching for scheduling.
492 * This is a two-stage process:
494 * - switch_fpu_prepare() saves the old state.
495 * This is done within the context of the old process.
497 * - switch_fpu_finish() restores the new state as
501 switch_fpu_prepare(struct fpu *old_fpu, int cpu)
503 if (static_cpu_has(X86_FEATURE_FPU) && old_fpu->initialized) {
504 if (!copy_fpregs_to_fpstate(old_fpu))
505 old_fpu->last_cpu = -1;
507 old_fpu->last_cpu = cpu;
509 /* But leave fpu_fpregs_owner_ctx! */
510 trace_x86_fpu_regs_deactivated(old_fpu);
512 old_fpu->last_cpu = -1;
516 * Misc helper functions:
520 * Set up the userspace FPU context for the new task, if the task
523 static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
525 bool preload = static_cpu_has(X86_FEATURE_FPU) &&
526 new_fpu->initialized;
529 if (!fpregs_state_valid(new_fpu, cpu))
530 copy_kernel_to_fpregs(&new_fpu->state);
531 fpregs_activate(new_fpu);
536 * Needs to be preemption-safe.
538 * NOTE! user_fpu_begin() must be used only immediately before restoring
539 * the save state. It does not do any saving/restoring on its own. In
540 * lazy FPU mode, it is just an optimization to avoid a #NM exception,
541 * the task can lose the FPU right after preempt_enable().
543 static inline void user_fpu_begin(void)
545 struct fpu *fpu = ¤t->thread.fpu;
548 fpregs_activate(fpu);
553 * MXCSR and XCR definitions:
556 extern unsigned int mxcsr_feature_mask;
558 #define XCR_XFEATURE_ENABLED_MASK 0x00000000
560 static inline u64 xgetbv(u32 index)
564 asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
565 : "=a" (eax), "=d" (edx)
567 return eax + ((u64)edx << 32);
570 static inline void xsetbv(u32 index, u64 value)
573 u32 edx = value >> 32;
575 asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
576 : : "a" (eax), "d" (edx), "c" (index));
579 #endif /* _ASM_X86_FPU_INTERNAL_H */