Commit | Line | Data |
---|---|---|
1361b83a LT |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
78f7f1e5 IM |
10 | #ifndef _ASM_X86_FPU_INTERNAL_H |
11 | #define _ASM_X86_FPU_INTERNAL_H | |
1361b83a | 12 | |
050902c0 | 13 | #include <linux/compat.h> |
952f07ec | 14 | #include <linux/sched.h> |
1361b83a | 15 | #include <linux/slab.h> |
f89e32e0 | 16 | |
1361b83a | 17 | #include <asm/user.h> |
df6b35f4 | 18 | #include <asm/fpu/api.h> |
669ebabb | 19 | #include <asm/fpu/xstate.h> |
cd4d09ec | 20 | #include <asm/cpufeature.h> |
d1898b73 | 21 | #include <asm/trace/fpu.h> |
1361b83a | 22 | |
6ffc152e IM |
23 | /* |
24 | * High level FPU state handling functions: | |
25 | */ | |
0c306bcf | 26 | extern void fpu__activate_curr(struct fpu *fpu); |
05602812 | 27 | extern void fpu__activate_fpstate_read(struct fpu *fpu); |
6a81d7eb | 28 | extern void fpu__activate_fpstate_write(struct fpu *fpu); |
b8b9b6ba DH |
29 | extern void fpu__current_fpstate_write_begin(void); |
30 | extern void fpu__current_fpstate_write_end(void); | |
6ffc152e | 31 | extern void fpu__save(struct fpu *fpu); |
e1884d69 | 32 | extern void fpu__restore(struct fpu *fpu); |
82c0e45e | 33 | extern int fpu__restore_sig(void __user *buf, int ia32_frame); |
6ffc152e IM |
34 | extern void fpu__drop(struct fpu *fpu); |
35 | extern int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu); | |
04c8e01d | 36 | extern void fpu__clear(struct fpu *fpu); |
b1b64dc3 IM |
37 | extern int fpu__exception_code(struct fpu *fpu, int trap_nr); |
38 | extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate); | |
6ffc152e | 39 | |
b1b64dc3 IM |
40 | /* |
41 | * Boot time FPU initialization functions: | |
42 | */ | |
43 | extern void fpu__init_cpu(void); | |
44 | extern void fpu__init_system_xstate(void); | |
45 | extern void fpu__init_cpu_xstate(void); | |
46 | extern void fpu__init_system(struct cpuinfo_x86 *c); | |
952f07ec IM |
47 | extern void fpu__init_check_bugs(void); |
48 | extern void fpu__resume_cpu(void); | |
a5fe93a5 | 49 | extern u64 fpu__get_supported_xfeatures_mask(void); |
952f07ec | 50 | |
e97131a8 IM |
51 | /* |
52 | * Debugging facility: | |
53 | */ | |
54 | #ifdef CONFIG_X86_DEBUG_FPU | |
55 | # define WARN_ON_FPU(x) WARN_ON_ONCE(x) | |
56 | #else | |
83242c51 | 57 | # define WARN_ON_FPU(x) ({ (void)(x); 0; }) |
e97131a8 IM |
58 | #endif |
59 | ||
1c927eea | 60 | /* |
b1b64dc3 | 61 | * FPU related CPU feature flag helper routines: |
1c927eea | 62 | */ |
5d2bd700 SS |
63 | static __always_inline __pure bool use_eager_fpu(void) |
64 | { | |
bc696ca0 | 65 | return static_cpu_has(X86_FEATURE_EAGER_FPU); |
5d2bd700 SS |
66 | } |
67 | ||
1361b83a LT |
68 | static __always_inline __pure bool use_xsaveopt(void) |
69 | { | |
bc696ca0 | 70 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
1361b83a LT |
71 | } |
72 | ||
73 | static __always_inline __pure bool use_xsave(void) | |
74 | { | |
bc696ca0 | 75 | return static_cpu_has(X86_FEATURE_XSAVE); |
1361b83a LT |
76 | } |
77 | ||
78 | static __always_inline __pure bool use_fxsr(void) | |
79 | { | |
bc696ca0 | 80 | return static_cpu_has(X86_FEATURE_FXSR); |
1361b83a LT |
81 | } |
82 | ||
b1b64dc3 IM |
83 | /* |
84 | * fpstate handling functions: | |
85 | */ | |
86 | ||
87 | extern union fpregs_state init_fpstate; | |
88 | ||
89 | extern void fpstate_init(union fpregs_state *state); | |
90 | #ifdef CONFIG_MATH_EMULATION | |
91 | extern void fpstate_init_soft(struct swregs_state *soft); | |
92 | #else | |
93 | static inline void fpstate_init_soft(struct swregs_state *soft) {} | |
94 | #endif | |
95 | static inline void fpstate_init_fxstate(struct fxregs_state *fx) | |
96 | { | |
97 | fx->cwd = 0x37f; | |
98 | fx->mxcsr = MXCSR_DEFAULT; | |
99 | } | |
36e49e7f | 100 | extern void fpstate_sanitize_xstate(struct fpu *fpu); |
1361b83a | 101 | |
49b8c695 PA |
102 | #define user_insn(insn, output, input...) \ |
103 | ({ \ | |
104 | int err; \ | |
105 | asm volatile(ASM_STAC "\n" \ | |
106 | "1:" #insn "\n\t" \ | |
107 | "2: " ASM_CLAC "\n" \ | |
108 | ".section .fixup,\"ax\"\n" \ | |
109 | "3: movl $-1,%[err]\n" \ | |
110 | " jmp 2b\n" \ | |
111 | ".previous\n" \ | |
112 | _ASM_EXTABLE(1b, 3b) \ | |
113 | : [err] "=r" (err), output \ | |
114 | : "0"(0), input); \ | |
115 | err; \ | |
116 | }) | |
117 | ||
0ca5bd0d SS |
118 | #define check_insn(insn, output, input...) \ |
119 | ({ \ | |
120 | int err; \ | |
121 | asm volatile("1:" #insn "\n\t" \ | |
122 | "2:\n" \ | |
123 | ".section .fixup,\"ax\"\n" \ | |
124 | "3: movl $-1,%[err]\n" \ | |
125 | " jmp 2b\n" \ | |
126 | ".previous\n" \ | |
127 | _ASM_EXTABLE(1b, 3b) \ | |
128 | : [err] "=r" (err), output \ | |
129 | : "0"(0), input); \ | |
130 | err; \ | |
131 | }) | |
132 | ||
c47ada30 | 133 | static inline int copy_fregs_to_user(struct fregs_state __user *fx) |
1361b83a | 134 | { |
49b8c695 | 135 | return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); |
1361b83a LT |
136 | } |
137 | ||
c47ada30 | 138 | static inline int copy_fxregs_to_user(struct fxregs_state __user *fx) |
1361b83a | 139 | { |
97f2645f | 140 | if (IS_ENABLED(CONFIG_X86_32)) |
49b8c695 | 141 | return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); |
97f2645f | 142 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
49b8c695 | 143 | return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); |
1361b83a | 144 | |
c6813144 | 145 | /* See comment in copy_fxregs_to_kernel() below. */ |
49b8c695 | 146 | return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); |
1361b83a LT |
147 | } |
148 | ||
9ccc27a5 | 149 | static inline void copy_kernel_to_fxregs(struct fxregs_state *fx) |
1361b83a | 150 | { |
43b287b3 IM |
151 | int err; |
152 | ||
97f2645f | 153 | if (IS_ENABLED(CONFIG_X86_32)) { |
43b287b3 IM |
154 | err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
155 | } else { | |
97f2645f | 156 | if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) { |
43b287b3 IM |
157 | err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
158 | } else { | |
159 | /* See comment in copy_fxregs_to_kernel() below. */ | |
160 | err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx)); | |
161 | } | |
162 | } | |
163 | /* Copying from a kernel buffer to FPU registers should never fail: */ | |
164 | WARN_ON_FPU(err); | |
1361b83a LT |
165 | } |
166 | ||
c47ada30 | 167 | static inline int copy_user_to_fxregs(struct fxregs_state __user *fx) |
e139e955 | 168 | { |
97f2645f | 169 | if (IS_ENABLED(CONFIG_X86_32)) |
e139e955 | 170 | return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
97f2645f | 171 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
e139e955 PA |
172 | return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); |
173 | ||
c6813144 | 174 | /* See comment in copy_fxregs_to_kernel() below. */ |
e139e955 PA |
175 | return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), |
176 | "m" (*fx)); | |
177 | } | |
178 | ||
9ccc27a5 | 179 | static inline void copy_kernel_to_fregs(struct fregs_state *fx) |
1361b83a | 180 | { |
43b287b3 IM |
181 | int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); |
182 | ||
183 | WARN_ON_FPU(err); | |
e139e955 PA |
184 | } |
185 | ||
c47ada30 | 186 | static inline int copy_user_to_fregs(struct fregs_state __user *fx) |
e139e955 PA |
187 | { |
188 | return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); | |
1361b83a LT |
189 | } |
190 | ||
c6813144 | 191 | static inline void copy_fxregs_to_kernel(struct fpu *fpu) |
1361b83a | 192 | { |
97f2645f | 193 | if (IS_ENABLED(CONFIG_X86_32)) |
7366ed77 | 194 | asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
97f2645f | 195 | else if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) |
7366ed77 | 196 | asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
197 | else { |
198 | /* Using "rex64; fxsave %0" is broken because, if the memory | |
199 | * operand uses any extended registers for addressing, a second | |
200 | * REX prefix will be generated (to the assembler, rex64 | |
201 | * followed by semicolon is a separate instruction), and hence | |
202 | * the 64-bitness is lost. | |
203 | * | |
204 | * Using "fxsaveq %0" would be the ideal choice, but is only | |
205 | * supported starting with gas 2.16. | |
206 | * | |
207 | * Using, as a workaround, the properly prefixed form below | |
208 | * isn't accepted by any binutils version so far released, | |
209 | * complaining that the same type of prefix is used twice if | |
210 | * an extended register is needed for addressing (fix submitted | |
211 | * to mainline 2005-11-21). | |
212 | * | |
7366ed77 | 213 | * asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave)); |
0ca5bd0d SS |
214 | * |
215 | * This, however, we can work around by forcing the compiler to | |
216 | * select an addressing mode that doesn't require extended | |
217 | * registers. | |
218 | */ | |
219 | asm volatile( "rex64/fxsave (%[fx])" | |
7366ed77 IM |
220 | : "=m" (fpu->state.fxsave) |
221 | : [fx] "R" (&fpu->state.fxsave)); | |
0ca5bd0d | 222 | } |
1361b83a LT |
223 | } |
224 | ||
fd169b05 IM |
225 | /* These macros all use (%edi)/(%rdi) as the single memory argument. */ |
226 | #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27" | |
227 | #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37" | |
228 | #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f" | |
229 | #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f" | |
230 | #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f" | |
231 | ||
b74a0cf1 BP |
232 | #define XSTATE_OP(op, st, lmask, hmask, err) \ |
233 | asm volatile("1:" op "\n\t" \ | |
234 | "xor %[err], %[err]\n" \ | |
235 | "2:\n\t" \ | |
236 | ".pushsection .fixup,\"ax\"\n\t" \ | |
237 | "3: movl $-2,%[err]\n\t" \ | |
238 | "jmp 2b\n\t" \ | |
239 | ".popsection\n\t" \ | |
240 | _ASM_EXTABLE(1b, 3b) \ | |
241 | : [err] "=r" (err) \ | |
242 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
243 | : "memory") | |
244 | ||
b7106fa0 BP |
245 | /* |
246 | * If XSAVES is enabled, it replaces XSAVEOPT because it supports a compact | |
247 | * format and supervisor states in addition to modified optimization in | |
248 | * XSAVEOPT. | |
249 | * | |
250 | * Otherwise, if XSAVEOPT is enabled, XSAVEOPT replaces XSAVE because XSAVEOPT | |
251 | * supports modified optimization which is not supported by XSAVE. | |
252 | * | |
253 | * We use XSAVE as a fallback. | |
254 | * | |
255 | * The 661 label is defined in the ALTERNATIVE* macros as the address of the | |
256 | * original instruction which gets replaced. We need to use it here as the | |
257 | * address of the instruction where we might get an exception at. | |
258 | */ | |
259 | #define XSTATE_XSAVE(st, lmask, hmask, err) \ | |
260 | asm volatile(ALTERNATIVE_2(XSAVE, \ | |
261 | XSAVEOPT, X86_FEATURE_XSAVEOPT, \ | |
262 | XSAVES, X86_FEATURE_XSAVES) \ | |
263 | "\n" \ | |
264 | "xor %[err], %[err]\n" \ | |
265 | "3:\n" \ | |
266 | ".pushsection .fixup,\"ax\"\n" \ | |
267 | "4: movl $-2, %[err]\n" \ | |
268 | "jmp 3b\n" \ | |
269 | ".popsection\n" \ | |
270 | _ASM_EXTABLE(661b, 4b) \ | |
271 | : [err] "=r" (err) \ | |
272 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
273 | : "memory") | |
274 | ||
275 | /* | |
276 | * Use XRSTORS to restore context if it is enabled. XRSTORS supports compact | |
277 | * XSAVE area format. | |
278 | */ | |
279 | #define XSTATE_XRESTORE(st, lmask, hmask, err) \ | |
280 | asm volatile(ALTERNATIVE(XRSTOR, \ | |
281 | XRSTORS, X86_FEATURE_XSAVES) \ | |
282 | "\n" \ | |
283 | "xor %[err], %[err]\n" \ | |
284 | "3:\n" \ | |
285 | ".pushsection .fixup,\"ax\"\n" \ | |
286 | "4: movl $-2, %[err]\n" \ | |
287 | "jmp 3b\n" \ | |
288 | ".popsection\n" \ | |
289 | _ASM_EXTABLE(661b, 4b) \ | |
290 | : [err] "=r" (err) \ | |
291 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | |
292 | : "memory") | |
b74a0cf1 | 293 | |
fd169b05 IM |
294 | /* |
295 | * This function is called only during boot time when x86 caps are not set | |
296 | * up and alternative can not be used yet. | |
297 | */ | |
8c05f05e | 298 | static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate) |
fd169b05 IM |
299 | { |
300 | u64 mask = -1; | |
301 | u32 lmask = mask; | |
302 | u32 hmask = mask >> 32; | |
b74a0cf1 | 303 | int err; |
fd169b05 IM |
304 | |
305 | WARN_ON(system_state != SYSTEM_BOOTING); | |
306 | ||
bc696ca0 | 307 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
b74a0cf1 | 308 | XSTATE_OP(XSAVES, xstate, lmask, hmask, err); |
fd169b05 | 309 | else |
b74a0cf1 | 310 | XSTATE_OP(XSAVE, xstate, lmask, hmask, err); |
8c05f05e IM |
311 | |
312 | /* We should never fault when copying to a kernel buffer: */ | |
313 | WARN_ON_FPU(err); | |
fd169b05 IM |
314 | } |
315 | ||
316 | /* | |
317 | * This function is called only during boot time when x86 caps are not set | |
318 | * up and alternative can not be used yet. | |
319 | */ | |
d65fcd60 | 320 | static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate) |
fd169b05 | 321 | { |
d65fcd60 | 322 | u64 mask = -1; |
fd169b05 IM |
323 | u32 lmask = mask; |
324 | u32 hmask = mask >> 32; | |
b74a0cf1 | 325 | int err; |
fd169b05 IM |
326 | |
327 | WARN_ON(system_state != SYSTEM_BOOTING); | |
328 | ||
bc696ca0 | 329 | if (static_cpu_has(X86_FEATURE_XSAVES)) |
b74a0cf1 | 330 | XSTATE_OP(XRSTORS, xstate, lmask, hmask, err); |
fd169b05 | 331 | else |
b74a0cf1 | 332 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); |
8c05f05e IM |
333 | |
334 | /* We should never fault when copying from a kernel buffer: */ | |
335 | WARN_ON_FPU(err); | |
fd169b05 IM |
336 | } |
337 | ||
338 | /* | |
339 | * Save processor xstate to xsave area. | |
340 | */ | |
8c05f05e | 341 | static inline void copy_xregs_to_kernel(struct xregs_state *xstate) |
fd169b05 IM |
342 | { |
343 | u64 mask = -1; | |
344 | u32 lmask = mask; | |
345 | u32 hmask = mask >> 32; | |
b7106fa0 | 346 | int err; |
fd169b05 IM |
347 | |
348 | WARN_ON(!alternatives_patched); | |
349 | ||
b7106fa0 | 350 | XSTATE_XSAVE(xstate, lmask, hmask, err); |
fd169b05 | 351 | |
8c05f05e IM |
352 | /* We should never fault when copying to a kernel buffer: */ |
353 | WARN_ON_FPU(err); | |
fd169b05 IM |
354 | } |
355 | ||
356 | /* | |
357 | * Restore processor xstate from xsave area. | |
358 | */ | |
8c05f05e | 359 | static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask) |
fd169b05 | 360 | { |
fd169b05 IM |
361 | u32 lmask = mask; |
362 | u32 hmask = mask >> 32; | |
b7106fa0 | 363 | int err; |
fd169b05 | 364 | |
b7106fa0 | 365 | XSTATE_XRESTORE(xstate, lmask, hmask, err); |
fd169b05 | 366 | |
8c05f05e IM |
367 | /* We should never fault when copying from a kernel buffer: */ |
368 | WARN_ON_FPU(err); | |
fd169b05 IM |
369 | } |
370 | ||
371 | /* | |
372 | * Save xstate to user space xsave area. | |
373 | * | |
374 | * We don't use modified optimization because xrstor/xrstors might track | |
375 | * a different application. | |
376 | * | |
377 | * We don't use compacted format xsave area for | |
378 | * backward compatibility for old applications which don't understand | |
379 | * compacted format of xsave area. | |
380 | */ | |
381 | static inline int copy_xregs_to_user(struct xregs_state __user *buf) | |
382 | { | |
383 | int err; | |
384 | ||
385 | /* | |
386 | * Clear the xsave header first, so that reserved fields are | |
387 | * initialized to zero. | |
388 | */ | |
389 | err = __clear_user(&buf->header, sizeof(buf->header)); | |
390 | if (unlikely(err)) | |
391 | return -EFAULT; | |
392 | ||
b74a0cf1 BP |
393 | stac(); |
394 | XSTATE_OP(XSAVE, buf, -1, -1, err); | |
395 | clac(); | |
396 | ||
fd169b05 IM |
397 | return err; |
398 | } | |
399 | ||
400 | /* | |
401 | * Restore xstate from user space xsave area. | |
402 | */ | |
403 | static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask) | |
404 | { | |
fd169b05 IM |
405 | struct xregs_state *xstate = ((__force struct xregs_state *)buf); |
406 | u32 lmask = mask; | |
407 | u32 hmask = mask >> 32; | |
b74a0cf1 BP |
408 | int err; |
409 | ||
410 | stac(); | |
411 | XSTATE_OP(XRSTOR, xstate, lmask, hmask, err); | |
412 | clac(); | |
fd169b05 | 413 | |
fd169b05 IM |
414 | return err; |
415 | } | |
416 | ||
1361b83a LT |
417 | /* |
418 | * These must be called with preempt disabled. Returns | |
4f836347 IM |
419 | * 'true' if the FPU state is still intact and we can |
420 | * keep registers active. | |
421 | * | |
422 | * The legacy FNSAVE instruction cleared all FPU state | |
423 | * unconditionally, so registers are essentially destroyed. | |
424 | * Modern FPU state can be kept in registers, if there are | |
1bc6b056 | 425 | * no pending FP exceptions. |
1361b83a | 426 | */ |
4f836347 | 427 | static inline int copy_fpregs_to_fpstate(struct fpu *fpu) |
1361b83a | 428 | { |
1bc6b056 | 429 | if (likely(use_xsave())) { |
c6813144 | 430 | copy_xregs_to_kernel(&fpu->state.xsave); |
1bc6b056 IM |
431 | return 1; |
432 | } | |
1361b83a | 433 | |
1bc6b056 | 434 | if (likely(use_fxsr())) { |
c6813144 | 435 | copy_fxregs_to_kernel(fpu); |
1bc6b056 | 436 | return 1; |
1361b83a LT |
437 | } |
438 | ||
439 | /* | |
1bc6b056 IM |
440 | * Legacy FPU register saving, FNSAVE always clears FPU registers, |
441 | * so we have to mark them inactive: | |
1361b83a | 442 | */ |
87dafd41 | 443 | asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave)); |
4f836347 | 444 | |
4f836347 | 445 | return 0; |
1361b83a LT |
446 | } |
447 | ||
003e2e8b | 448 | static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate) |
1361b83a | 449 | { |
8c05f05e | 450 | if (use_xsave()) { |
003e2e8b | 451 | copy_kernel_to_xregs(&fpstate->xsave, -1); |
8c05f05e IM |
452 | } else { |
453 | if (use_fxsr()) | |
003e2e8b | 454 | copy_kernel_to_fxregs(&fpstate->fxsave); |
8c05f05e | 455 | else |
003e2e8b | 456 | copy_kernel_to_fregs(&fpstate->fsave); |
8c05f05e | 457 | } |
1361b83a LT |
458 | } |
459 | ||
003e2e8b | 460 | static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate) |
1361b83a | 461 | { |
6ca7a8a1 BP |
462 | /* |
463 | * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is | |
464 | * pending. Clear the x87 state here by setting it to fixed values. | |
465 | * "m" is a random variable that should be in L1. | |
466 | */ | |
bc696ca0 | 467 | if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) { |
26bef131 LT |
468 | asm volatile( |
469 | "fnclex\n\t" | |
470 | "emms\n\t" | |
471 | "fildl %P[addr]" /* set F?P to defined value */ | |
003e2e8b | 472 | : : [addr] "m" (fpstate)); |
26bef131 | 473 | } |
1361b83a | 474 | |
003e2e8b | 475 | __copy_kernel_to_fpregs(fpstate); |
1361b83a LT |
476 | } |
477 | ||
87dafd41 | 478 | extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size); |
b1b64dc3 IM |
479 | |
480 | /* | |
481 | * FPU context switch related helper methods: | |
482 | */ | |
483 | ||
484 | DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); | |
485 | ||
486 | /* | |
487 | * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx, | |
488 | * on this CPU. | |
489 | * | |
490 | * This will disable any lazy FPU state restore of the current FPU state, | |
491 | * but if the current thread owns the FPU, it will still be saved by. | |
492 | */ | |
493 | static inline void __cpu_disable_lazy_restore(unsigned int cpu) | |
494 | { | |
495 | per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; | |
496 | } | |
497 | ||
498 | static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu) | |
499 | { | |
500 | return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu; | |
501 | } | |
502 | ||
503 | ||
32b49b3c IM |
504 | /* |
505 | * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation' | |
506 | * idiom, which is then paired with the sw-flag (fpregs_active) later on: | |
507 | */ | |
508 | ||
509 | static inline void __fpregs_activate_hw(void) | |
510 | { | |
511 | if (!use_eager_fpu()) | |
512 | clts(); | |
513 | } | |
514 | ||
515 | static inline void __fpregs_deactivate_hw(void) | |
516 | { | |
517 | if (!use_eager_fpu()) | |
518 | stts(); | |
519 | } | |
520 | ||
521 | /* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */ | |
723c58e4 | 522 | static inline void __fpregs_deactivate(struct fpu *fpu) |
1361b83a | 523 | { |
e97131a8 IM |
524 | WARN_ON_FPU(!fpu->fpregs_active); |
525 | ||
d5cea9b0 | 526 | fpu->fpregs_active = 0; |
36b544dc | 527 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
d1898b73 | 528 | trace_x86_fpu_regs_deactivated(fpu); |
1361b83a LT |
529 | } |
530 | ||
32b49b3c | 531 | /* Must be paired with a 'clts' (fpregs_activate_hw()) before! */ |
dfaea4e6 | 532 | static inline void __fpregs_activate(struct fpu *fpu) |
1361b83a | 533 | { |
e97131a8 IM |
534 | WARN_ON_FPU(fpu->fpregs_active); |
535 | ||
d5cea9b0 | 536 | fpu->fpregs_active = 1; |
c0311f63 | 537 | this_cpu_write(fpu_fpregs_owner_ctx, fpu); |
d1898b73 | 538 | trace_x86_fpu_regs_activated(fpu); |
1361b83a LT |
539 | } |
540 | ||
952f07ec IM |
541 | /* |
542 | * The question "does this thread have fpu access?" | |
543 | * is slightly racy, since preemption could come in | |
544 | * and revoke it immediately after the test. | |
545 | * | |
546 | * However, even in that very unlikely scenario, | |
547 | * we can just assume we have FPU access - typically | |
548 | * to save the FP state - we'll just take a #NM | |
549 | * fault and get the FPU access back. | |
550 | */ | |
3c6dffa9 | 551 | static inline int fpregs_active(void) |
952f07ec IM |
552 | { |
553 | return current->thread.fpu.fpregs_active; | |
554 | } | |
555 | ||
1361b83a LT |
556 | /* |
557 | * Encapsulate the CR0.TS handling together with the | |
558 | * software flag. | |
559 | * | |
560 | * These generally need preemption protection to work, | |
561 | * do try to avoid using these on their own. | |
562 | */ | |
66af8e27 | 563 | static inline void fpregs_activate(struct fpu *fpu) |
1361b83a | 564 | { |
32b49b3c | 565 | __fpregs_activate_hw(); |
66af8e27 | 566 | __fpregs_activate(fpu); |
1361b83a LT |
567 | } |
568 | ||
66af8e27 | 569 | static inline void fpregs_deactivate(struct fpu *fpu) |
1361b83a | 570 | { |
66af8e27 | 571 | __fpregs_deactivate(fpu); |
32b49b3c | 572 | __fpregs_deactivate_hw(); |
1361b83a LT |
573 | } |
574 | ||
575 | /* | |
576 | * FPU state switching for scheduling. | |
577 | * | |
578 | * This is a two-stage process: | |
579 | * | |
580 | * - switch_fpu_prepare() saves the old state and | |
581 | * sets the new state of the CR0.TS bit. This is | |
582 | * done within the context of the old process. | |
583 | * | |
584 | * - switch_fpu_finish() restores the new state as | |
585 | * necessary. | |
586 | */ | |
587 | typedef struct { int preload; } fpu_switch_t; | |
588 | ||
cb8818b6 IM |
589 | static inline fpu_switch_t |
590 | switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu) | |
1361b83a LT |
591 | { |
592 | fpu_switch_t fpu; | |
593 | ||
304bceda SS |
594 | /* |
595 | * If the task has used the math, pre-load the FPU on xsave processors | |
596 | * or if the past 5 consecutive context-switches used math. | |
597 | */ | |
4ecd16ec AL |
598 | fpu.preload = static_cpu_has(X86_FEATURE_FPU) && |
599 | new_fpu->fpstate_active && | |
cb8818b6 | 600 | (use_eager_fpu() || new_fpu->counter > 5); |
1361ef29 | 601 | |
d5cea9b0 | 602 | if (old_fpu->fpregs_active) { |
4f836347 | 603 | if (!copy_fpregs_to_fpstate(old_fpu)) |
cb8818b6 | 604 | old_fpu->last_cpu = -1; |
1361ef29 | 605 | else |
cb8818b6 | 606 | old_fpu->last_cpu = cpu; |
1361ef29 | 607 | |
36b544dc | 608 | /* But leave fpu_fpregs_owner_ctx! */ |
d5cea9b0 | 609 | old_fpu->fpregs_active = 0; |
d1898b73 | 610 | trace_x86_fpu_regs_deactivated(old_fpu); |
1361b83a LT |
611 | |
612 | /* Don't change CR0.TS if we just switch! */ | |
613 | if (fpu.preload) { | |
cb8818b6 | 614 | new_fpu->counter++; |
dfaea4e6 | 615 | __fpregs_activate(new_fpu); |
d1898b73 | 616 | trace_x86_fpu_regs_activated(new_fpu); |
7366ed77 | 617 | prefetch(&new_fpu->state); |
32b49b3c IM |
618 | } else { |
619 | __fpregs_deactivate_hw(); | |
620 | } | |
1361b83a | 621 | } else { |
cb8818b6 IM |
622 | old_fpu->counter = 0; |
623 | old_fpu->last_cpu = -1; | |
1361b83a | 624 | if (fpu.preload) { |
cb8818b6 | 625 | new_fpu->counter++; |
66ddc2cb | 626 | if (fpu_want_lazy_restore(new_fpu, cpu)) |
1361b83a LT |
627 | fpu.preload = 0; |
628 | else | |
7366ed77 | 629 | prefetch(&new_fpu->state); |
232f62cd | 630 | fpregs_activate(new_fpu); |
1361b83a LT |
631 | } |
632 | } | |
633 | return fpu; | |
634 | } | |
635 | ||
b1b64dc3 IM |
636 | /* |
637 | * Misc helper functions: | |
638 | */ | |
639 | ||
1361b83a LT |
640 | /* |
641 | * By the time this gets called, we've already cleared CR0.TS and | |
642 | * given the process the FPU if we are going to preload the FPU | |
643 | * state - all we need to do is to conditionally restore the register | |
644 | * state itself. | |
645 | */ | |
384a23f9 | 646 | static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch) |
1361b83a | 647 | { |
9ccc27a5 | 648 | if (fpu_switch.preload) |
003e2e8b | 649 | copy_kernel_to_fpregs(&new_fpu->state); |
1361b83a LT |
650 | } |
651 | ||
1361b83a | 652 | /* |
fb14b4ea | 653 | * Needs to be preemption-safe. |
1361b83a | 654 | * |
377ffbcc | 655 | * NOTE! user_fpu_begin() must be used only immediately before restoring |
fb14b4ea ON |
656 | * the save state. It does not do any saving/restoring on its own. In |
657 | * lazy FPU mode, it is just an optimization to avoid a #NM exception, | |
658 | * the task can lose the FPU right after preempt_enable(). | |
1361b83a | 659 | */ |
1361b83a LT |
660 | static inline void user_fpu_begin(void) |
661 | { | |
4540d3fa IM |
662 | struct fpu *fpu = ¤t->thread.fpu; |
663 | ||
1361b83a | 664 | preempt_disable(); |
3c6dffa9 | 665 | if (!fpregs_active()) |
232f62cd | 666 | fpregs_activate(fpu); |
1361b83a LT |
667 | preempt_enable(); |
668 | } | |
669 | ||
b1b64dc3 IM |
670 | /* |
671 | * MXCSR and XCR definitions: | |
672 | */ | |
673 | ||
674 | extern unsigned int mxcsr_feature_mask; | |
675 | ||
676 | #define XCR_XFEATURE_ENABLED_MASK 0x00000000 | |
677 | ||
678 | static inline u64 xgetbv(u32 index) | |
679 | { | |
680 | u32 eax, edx; | |
681 | ||
682 | asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */ | |
683 | : "=a" (eax), "=d" (edx) | |
684 | : "c" (index)); | |
685 | return eax + ((u64)edx << 32); | |
686 | } | |
687 | ||
688 | static inline void xsetbv(u32 index, u64 value) | |
689 | { | |
690 | u32 eax = value; | |
691 | u32 edx = value >> 32; | |
692 | ||
693 | asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */ | |
694 | : : "a" (eax), "d" (edx), "c" (index)); | |
695 | } | |
696 | ||
78f7f1e5 | 697 | #endif /* _ASM_X86_FPU_INTERNAL_H */ |