Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
78f7f1e5 | 8 | #include <asm/fpu/internal.h> |
59a36d16 | 9 | #include <asm/fpu/regset.h> |
fcbc99c4 | 10 | #include <asm/fpu/signal.h> |
e1cebad4 | 11 | #include <asm/traps.h> |
fcbc99c4 | 12 | |
91066588 | 13 | #include <linux/hardirq.h> |
1da177e4 | 14 | |
6f575023 IM |
15 | /* |
16 | * Represents the initial FPU state. It's mostly (but not completely) zeroes, | |
17 | * depending on the FPU hardware format: | |
18 | */ | |
c47ada30 | 19 | union fpregs_state init_fpstate __read_mostly; |
6f575023 | 20 | |
085cc281 IM |
21 | /* |
22 | * Track whether the kernel is using the FPU state | |
23 | * currently. | |
24 | * | |
25 | * This flag is used: | |
26 | * | |
27 | * - by IRQ context code to potentially use the FPU | |
28 | * if it's unused. | |
29 | * | |
30 | * - to debug kernel_fpu_begin()/end() correctness | |
31 | */ | |
14e153ef ON |
32 | static DEFINE_PER_CPU(bool, in_kernel_fpu); |
33 | ||
b0c050c5 | 34 | /* |
36b544dc | 35 | * Track which context is using the FPU on the CPU: |
b0c050c5 | 36 | */ |
36b544dc | 37 | DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
b0c050c5 | 38 | |
416d49ac | 39 | static void kernel_fpu_disable(void) |
7575637a ON |
40 | { |
41 | WARN_ON(this_cpu_read(in_kernel_fpu)); | |
42 | this_cpu_write(in_kernel_fpu, true); | |
43 | } | |
44 | ||
416d49ac | 45 | static void kernel_fpu_enable(void) |
7575637a | 46 | { |
3103ae3a | 47 | WARN_ON_ONCE(!this_cpu_read(in_kernel_fpu)); |
7575637a ON |
48 | this_cpu_write(in_kernel_fpu, false); |
49 | } | |
50 | ||
085cc281 IM |
51 | static bool kernel_fpu_disabled(void) |
52 | { | |
53 | return this_cpu_read(in_kernel_fpu); | |
54 | } | |
55 | ||
8546c008 LT |
56 | /* |
57 | * Were we in an interrupt that interrupted kernel mode? | |
58 | * | |
304bceda | 59 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that |
8546c008 LT |
60 | * pair does nothing at all: the thread must not have fpu (so |
61 | * that we don't try to save the FPU state), and TS must | |
62 | * be set (so that the clts/stts pair does nothing that is | |
63 | * visible in the interrupted kernel thread). | |
5187b28f | 64 | * |
4b2e762e ON |
65 | * Except for the eagerfpu case when we return true; in the likely case |
66 | * the thread has FPU but we are not going to set/clear TS. | |
8546c008 | 67 | */ |
416d49ac | 68 | static bool interrupted_kernel_fpu_idle(void) |
8546c008 | 69 | { |
085cc281 | 70 | if (kernel_fpu_disabled()) |
14e153ef ON |
71 | return false; |
72 | ||
5d2bd700 | 73 | if (use_eager_fpu()) |
4b2e762e | 74 | return true; |
304bceda | 75 | |
d5cea9b0 | 76 | return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS); |
8546c008 LT |
77 | } |
78 | ||
79 | /* | |
80 | * Were we in user mode (or vm86 mode) when we were | |
81 | * interrupted? | |
82 | * | |
83 | * Doing kernel_fpu_begin/end() is ok if we are running | |
84 | * in an interrupt context from user mode - we'll just | |
85 | * save the FPU state as required. | |
86 | */ | |
416d49ac | 87 | static bool interrupted_user_mode(void) |
8546c008 LT |
88 | { |
89 | struct pt_regs *regs = get_irq_regs(); | |
f39b6f0e | 90 | return regs && user_mode(regs); |
8546c008 LT |
91 | } |
92 | ||
93 | /* | |
94 | * Can we use the FPU in kernel mode with the | |
95 | * whole "kernel_fpu_begin/end()" sequence? | |
96 | * | |
97 | * It's always ok in process context (ie "not interrupt") | |
98 | * but it is sometimes ok even from an irq. | |
99 | */ | |
100 | bool irq_fpu_usable(void) | |
101 | { | |
102 | return !in_interrupt() || | |
103 | interrupted_user_mode() || | |
104 | interrupted_kernel_fpu_idle(); | |
105 | } | |
106 | EXPORT_SYMBOL(irq_fpu_usable); | |
107 | ||
b1a74bf8 | 108 | void __kernel_fpu_begin(void) |
8546c008 | 109 | { |
36b544dc | 110 | struct fpu *fpu = ¤t->thread.fpu; |
8546c008 | 111 | |
63c6680c IM |
112 | WARN_ON_ONCE(!irq_fpu_usable()); |
113 | ||
3103ae3a | 114 | kernel_fpu_disable(); |
14e153ef | 115 | |
d5cea9b0 | 116 | if (fpu->fpregs_active) { |
4f836347 | 117 | copy_fpregs_to_fpstate(fpu); |
7aeccb83 | 118 | } else { |
36b544dc | 119 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
32b49b3c | 120 | __fpregs_activate_hw(); |
8546c008 LT |
121 | } |
122 | } | |
b1a74bf8 | 123 | EXPORT_SYMBOL(__kernel_fpu_begin); |
8546c008 | 124 | |
b1a74bf8 | 125 | void __kernel_fpu_end(void) |
8546c008 | 126 | { |
af2d94fd | 127 | struct fpu *fpu = ¤t->thread.fpu; |
33a3ebdc | 128 | |
d5cea9b0 | 129 | if (fpu->fpregs_active) { |
0e75c54f | 130 | if (WARN_ON(copy_fpstate_to_fpregs(fpu))) |
fbce7782 | 131 | fpu__clear(fpu); |
32b49b3c IM |
132 | } else { |
133 | __fpregs_deactivate_hw(); | |
731bd6a9 | 134 | } |
14e153ef | 135 | |
3103ae3a | 136 | kernel_fpu_enable(); |
8546c008 | 137 | } |
b1a74bf8 | 138 | EXPORT_SYMBOL(__kernel_fpu_end); |
8546c008 | 139 | |
d63e79b1 IM |
140 | void kernel_fpu_begin(void) |
141 | { | |
142 | preempt_disable(); | |
d63e79b1 IM |
143 | __kernel_fpu_begin(); |
144 | } | |
145 | EXPORT_SYMBOL_GPL(kernel_fpu_begin); | |
146 | ||
147 | void kernel_fpu_end(void) | |
148 | { | |
149 | __kernel_fpu_end(); | |
150 | preempt_enable(); | |
151 | } | |
152 | EXPORT_SYMBOL_GPL(kernel_fpu_end); | |
153 | ||
91066588 IM |
154 | /* |
155 | * CR0::TS save/restore functions: | |
156 | */ | |
157 | int irq_ts_save(void) | |
158 | { | |
159 | /* | |
160 | * If in process context and not atomic, we can take a spurious DNA fault. | |
161 | * Otherwise, doing clts() in process context requires disabling preemption | |
162 | * or some heavy lifting like kernel_fpu_begin() | |
163 | */ | |
164 | if (!in_atomic()) | |
165 | return 0; | |
166 | ||
167 | if (read_cr0() & X86_CR0_TS) { | |
168 | clts(); | |
169 | return 1; | |
170 | } | |
171 | ||
172 | return 0; | |
173 | } | |
174 | EXPORT_SYMBOL_GPL(irq_ts_save); | |
175 | ||
176 | void irq_ts_restore(int TS_state) | |
177 | { | |
178 | if (TS_state) | |
179 | stts(); | |
180 | } | |
181 | EXPORT_SYMBOL_GPL(irq_ts_restore); | |
182 | ||
4af08f2f | 183 | /* |
48c4717f | 184 | * Save the FPU state (mark it for reload if necessary): |
87cdb98a IM |
185 | * |
186 | * This only ever gets called for the current task. | |
4af08f2f | 187 | */ |
0c070595 | 188 | void fpu__save(struct fpu *fpu) |
8546c008 | 189 | { |
0c070595 | 190 | WARN_ON(fpu != ¤t->thread.fpu); |
87cdb98a | 191 | |
8546c008 | 192 | preempt_disable(); |
d5cea9b0 | 193 | if (fpu->fpregs_active) { |
48c4717f | 194 | if (!copy_fpregs_to_fpstate(fpu)) |
66af8e27 | 195 | fpregs_deactivate(fpu); |
a9241ea5 | 196 | } |
8546c008 LT |
197 | preempt_enable(); |
198 | } | |
4af08f2f | 199 | EXPORT_SYMBOL_GPL(fpu__save); |
8546c008 | 200 | |
0aba6978 IM |
201 | /* |
202 | * Legacy x87 fpstate state init: | |
203 | */ | |
c47ada30 | 204 | static inline void fpstate_init_fstate(struct fregs_state *fp) |
0aba6978 IM |
205 | { |
206 | fp->cwd = 0xffff037fu; | |
207 | fp->swd = 0xffff0000u; | |
208 | fp->twd = 0xffffffffu; | |
209 | fp->fos = 0xffff0000u; | |
210 | } | |
211 | ||
c47ada30 | 212 | void fpstate_init(union fpregs_state *state) |
1da177e4 | 213 | { |
60e019eb | 214 | if (!cpu_has_fpu) { |
bf935b0b | 215 | fpstate_init_soft(&state->soft); |
86603283 | 216 | return; |
e8a496ac | 217 | } |
e8a496ac | 218 | |
bf935b0b | 219 | memset(state, 0, xstate_size); |
1d23c451 | 220 | |
0aba6978 | 221 | if (cpu_has_fxsr) |
bf935b0b | 222 | fpstate_init_fxstate(&state->fxsave); |
0aba6978 | 223 | else |
bf935b0b | 224 | fpstate_init_fstate(&state->fsave); |
86603283 | 225 | } |
c0ee2cf6 | 226 | EXPORT_SYMBOL_GPL(fpstate_init); |
86603283 | 227 | |
bfd6fc05 IM |
228 | /* |
229 | * Copy the current task's FPU state to a new task's FPU context. | |
230 | * | |
aeb997b9 IM |
231 | * In both the 'eager' and the 'lazy' case we save hardware registers |
232 | * directly to the destination buffer. | |
bfd6fc05 | 233 | */ |
f9bc977f | 234 | static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
e102f30f | 235 | { |
f9bc977f | 236 | WARN_ON(src_fpu != ¤t->thread.fpu); |
bfd6fc05 | 237 | |
b1652900 IM |
238 | /* |
239 | * Don't let 'init optimized' areas of the XSAVE area | |
240 | * leak into the child task: | |
241 | */ | |
242 | if (use_eager_fpu()) | |
7366ed77 | 243 | memset(&dst_fpu->state.xsave, 0, xstate_size); |
b1652900 IM |
244 | |
245 | /* | |
246 | * Save current FPU registers directly into the child | |
247 | * FPU context, without any memory-to-memory copying. | |
248 | * | |
249 | * If the FPU context got destroyed in the process (FNSAVE | |
250 | * done on old CPUs) then copy it back into the source | |
251 | * context and mark the current task for lazy restore. | |
252 | * | |
253 | * We have to do all this with preemption disabled, | |
254 | * mostly because of the FNSAVE case, because in that | |
255 | * case we must not allow preemption in the window | |
256 | * between the FNSAVE and us marking the context lazy. | |
257 | * | |
258 | * It shouldn't be an issue as even FNSAVE is plenty | |
259 | * fast in terms of critical section length. | |
260 | */ | |
261 | preempt_disable(); | |
262 | if (!copy_fpregs_to_fpstate(dst_fpu)) { | |
263 | memcpy(&src_fpu->state, &dst_fpu->state, xstate_size); | |
264 | fpregs_deactivate(src_fpu); | |
e102f30f | 265 | } |
b1652900 | 266 | preempt_enable(); |
e102f30f IM |
267 | } |
268 | ||
c69e098b | 269 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
a752b53d | 270 | { |
c69e098b | 271 | dst_fpu->counter = 0; |
d5cea9b0 | 272 | dst_fpu->fpregs_active = 0; |
c69e098b | 273 | dst_fpu->last_cpu = -1; |
a752b53d | 274 | |
c4d6ee6e | 275 | if (src_fpu->fpstate_active) |
f9bc977f | 276 | fpu_copy(dst_fpu, src_fpu); |
c4d6ee6e | 277 | |
a752b53d IM |
278 | return 0; |
279 | } | |
280 | ||
97185c95 | 281 | /* |
c4d72e2d IM |
282 | * Activate the current task's in-memory FPU context, |
283 | * if it has not been used before: | |
97185c95 | 284 | */ |
c4d72e2d | 285 | void fpu__activate_curr(struct fpu *fpu) |
97185c95 | 286 | { |
91d93d0e | 287 | WARN_ON_ONCE(fpu != ¤t->thread.fpu); |
97185c95 | 288 | |
c4d72e2d | 289 | if (!fpu->fpstate_active) { |
bf935b0b | 290 | fpstate_init(&fpu->state); |
97185c95 | 291 | |
c4d72e2d IM |
292 | /* Safe to do for the current task: */ |
293 | fpu->fpstate_active = 1; | |
294 | } | |
97185c95 | 295 | } |
c4d72e2d | 296 | EXPORT_SYMBOL_GPL(fpu__activate_curr); |
97185c95 | 297 | |
86603283 | 298 | /* |
67ee658e IM |
299 | * This function must be called before we modify a stopped child's |
300 | * fpstate. | |
af7f8721 IM |
301 | * |
302 | * If the child has not used the FPU before then initialize its | |
67ee658e | 303 | * fpstate. |
af7f8721 IM |
304 | * |
305 | * If the child has used the FPU before then unlazy it. | |
306 | * | |
67ee658e IM |
307 | * [ After this function call, after registers in the fpstate are |
308 | * modified and the child task has woken up, the child task will | |
309 | * restore the modified FPU state from the modified context. If we | |
af7f8721 | 310 | * didn't clear its lazy status here then the lazy in-registers |
67ee658e | 311 | * state pending on its former CPU could be restored, corrupting |
af7f8721 IM |
312 | * the modifications. ] |
313 | * | |
314 | * This function is also called before we read a stopped child's | |
67ee658e IM |
315 | * FPU state - to make sure it's initialized if the child has |
316 | * no active FPU state. | |
af7f8721 IM |
317 | * |
318 | * TODO: A future optimization would be to skip the unlazying in | |
319 | * the read-only case, it's not strictly necessary for | |
320 | * read-only access to the context. | |
86603283 | 321 | */ |
0c306bcf | 322 | void fpu__activate_stopped(struct fpu *child_fpu) |
86603283 | 323 | { |
2fb29fc7 | 324 | WARN_ON_ONCE(child_fpu == ¤t->thread.fpu); |
67e97fc2 | 325 | |
c5bedc68 | 326 | if (child_fpu->fpstate_active) { |
cc08d545 | 327 | child_fpu->last_cpu = -1; |
2fb29fc7 | 328 | } else { |
bf935b0b | 329 | fpstate_init(&child_fpu->state); |
071ae621 | 330 | |
2fb29fc7 IM |
331 | /* Safe to do for stopped child tasks: */ |
332 | child_fpu->fpstate_active = 1; | |
333 | } | |
1da177e4 LT |
334 | } |
335 | ||
93b90712 | 336 | /* |
be7436d5 IM |
337 | * 'fpu__restore()' is called to copy FPU registers from |
338 | * the FPU fpstate to the live hw registers and to activate | |
339 | * access to the hardware registers, so that FPU instructions | |
340 | * can be used afterwards. | |
93b90712 | 341 | * |
be7436d5 IM |
342 | * Must be called with kernel preemption disabled (for example |
343 | * with local interrupts disabled, as it is in the case of | |
344 | * do_device_not_available()). | |
93b90712 | 345 | */ |
e1884d69 | 346 | void fpu__restore(struct fpu *fpu) |
93b90712 | 347 | { |
c4d72e2d | 348 | fpu__activate_curr(fpu); |
93b90712 | 349 | |
232f62cd | 350 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ |
93b90712 | 351 | kernel_fpu_disable(); |
232f62cd | 352 | fpregs_activate(fpu); |
0e75c54f | 353 | if (unlikely(copy_fpstate_to_fpregs(fpu))) { |
fbce7782 | 354 | fpu__clear(fpu); |
e1884d69 | 355 | force_sig_info(SIGSEGV, SEND_SIG_PRIV, current); |
93b90712 | 356 | } else { |
e1884d69 | 357 | fpu->counter++; |
93b90712 IM |
358 | } |
359 | kernel_fpu_enable(); | |
360 | } | |
3a0aee48 | 361 | EXPORT_SYMBOL_GPL(fpu__restore); |
93b90712 | 362 | |
6ffc152e IM |
363 | /* |
364 | * Drops current FPU state: deactivates the fpregs and | |
365 | * the fpstate. NOTE: it still leaves previous contents | |
366 | * in the fpregs in the eager-FPU case. | |
367 | * | |
368 | * This function can be used in cases where we know that | |
369 | * a state-restore is coming: either an explicit one, | |
370 | * or a reschedule. | |
371 | */ | |
372 | void fpu__drop(struct fpu *fpu) | |
373 | { | |
374 | preempt_disable(); | |
375 | fpu->counter = 0; | |
376 | ||
377 | if (fpu->fpregs_active) { | |
378 | /* Ignore delayed exceptions from user space */ | |
379 | asm volatile("1: fwait\n" | |
380 | "2:\n" | |
381 | _ASM_EXTABLE(1b, 2b)); | |
382 | fpregs_deactivate(fpu); | |
383 | } | |
384 | ||
385 | fpu->fpstate_active = 0; | |
386 | ||
387 | preempt_enable(); | |
388 | } | |
389 | ||
81541889 IM |
390 | /* |
391 | * Clear FPU registers by setting them up from | |
392 | * the init fpstate: | |
393 | */ | |
394 | static inline void copy_init_fpstate_to_fpregs(void) | |
395 | { | |
396 | if (use_xsave()) | |
c6813144 | 397 | copy_kernel_to_xregs(&init_fpstate.xsave, -1); |
81541889 | 398 | else |
c6813144 | 399 | copy_kernel_to_fxregs(&init_fpstate.fxsave); |
81541889 IM |
400 | } |
401 | ||
6ffc152e | 402 | /* |
fbce7782 IM |
403 | * Clear the FPU state back to init state. |
404 | * | |
405 | * Called by sys_execve(), by the signal handler code and by various | |
406 | * error paths. | |
2e85591a | 407 | */ |
04c8e01d | 408 | void fpu__clear(struct fpu *fpu) |
81683cc8 | 409 | { |
04c8e01d | 410 | WARN_ON_ONCE(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ |
4c138410 | 411 | |
81683cc8 IM |
412 | if (!use_eager_fpu()) { |
413 | /* FPU state will be reallocated lazily at the first use. */ | |
50338615 | 414 | fpu__drop(fpu); |
81683cc8 | 415 | } else { |
c5bedc68 | 416 | if (!fpu->fpstate_active) { |
c4d72e2d | 417 | fpu__activate_curr(fpu); |
81683cc8 IM |
418 | user_fpu_begin(); |
419 | } | |
81541889 | 420 | copy_init_fpstate_to_fpregs(); |
81683cc8 IM |
421 | } |
422 | } | |
423 | ||
e1cebad4 IM |
424 | /* |
425 | * x87 math exception handling: | |
426 | */ | |
427 | ||
428 | static inline unsigned short get_fpu_cwd(struct fpu *fpu) | |
429 | { | |
430 | if (cpu_has_fxsr) { | |
431 | return fpu->state.fxsave.cwd; | |
432 | } else { | |
433 | return (unsigned short)fpu->state.fsave.cwd; | |
434 | } | |
435 | } | |
436 | ||
437 | static inline unsigned short get_fpu_swd(struct fpu *fpu) | |
438 | { | |
439 | if (cpu_has_fxsr) { | |
440 | return fpu->state.fxsave.swd; | |
441 | } else { | |
442 | return (unsigned short)fpu->state.fsave.swd; | |
443 | } | |
444 | } | |
445 | ||
446 | static inline unsigned short get_fpu_mxcsr(struct fpu *fpu) | |
447 | { | |
448 | if (cpu_has_xmm) { | |
449 | return fpu->state.fxsave.mxcsr; | |
450 | } else { | |
451 | return MXCSR_DEFAULT; | |
452 | } | |
453 | } | |
454 | ||
455 | int fpu__exception_code(struct fpu *fpu, int trap_nr) | |
456 | { | |
457 | int err; | |
458 | ||
459 | if (trap_nr == X86_TRAP_MF) { | |
460 | unsigned short cwd, swd; | |
461 | /* | |
462 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
463 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
464 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
465 | * fault bit. We should only be taking one exception at a time, | |
466 | * so if this combination doesn't produce any single exception, | |
467 | * then we have a bad program that isn't synchronizing its FPU usage | |
468 | * and it will suffer the consequences since we won't be able to | |
469 | * fully reproduce the context of the exception | |
470 | */ | |
471 | cwd = get_fpu_cwd(fpu); | |
472 | swd = get_fpu_swd(fpu); | |
473 | ||
474 | err = swd & ~cwd; | |
475 | } else { | |
476 | /* | |
477 | * The SIMD FPU exceptions are handled a little differently, as there | |
478 | * is only a single status/control register. Thus, to determine which | |
479 | * unmasked exception was caught we must mask the exception mask bits | |
480 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
481 | */ | |
482 | unsigned short mxcsr = get_fpu_mxcsr(fpu); | |
483 | err = ~(mxcsr >> 7) & mxcsr; | |
484 | } | |
485 | ||
486 | if (err & 0x001) { /* Invalid op */ | |
487 | /* | |
488 | * swd & 0x240 == 0x040: Stack Underflow | |
489 | * swd & 0x240 == 0x240: Stack Overflow | |
490 | * User must clear the SF bit (0x40) if set | |
491 | */ | |
492 | return FPE_FLTINV; | |
493 | } else if (err & 0x004) { /* Divide by Zero */ | |
494 | return FPE_FLTDIV; | |
495 | } else if (err & 0x008) { /* Overflow */ | |
496 | return FPE_FLTOVF; | |
497 | } else if (err & 0x012) { /* Denormal, Underflow */ | |
498 | return FPE_FLTUND; | |
499 | } else if (err & 0x020) { /* Precision */ | |
500 | return FPE_FLTRES; | |
501 | } | |
502 | ||
503 | /* | |
504 | * If we're using IRQ 13, or supposedly even some trap | |
505 | * X86_TRAP_MF implementations, it's possible | |
506 | * we get a spurious trap, which is not an error. | |
507 | */ | |
508 | return 0; | |
509 | } |