Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1994 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | */ | |
78f7f1e5 | 8 | #include <asm/fpu/internal.h> |
59a36d16 | 9 | #include <asm/fpu/regset.h> |
fcbc99c4 | 10 | #include <asm/fpu/signal.h> |
e1cebad4 | 11 | #include <asm/traps.h> |
fcbc99c4 | 12 | |
91066588 | 13 | #include <linux/hardirq.h> |
1da177e4 | 14 | |
6f575023 IM |
15 | /* |
16 | * Represents the initial FPU state. It's mostly (but not completely) zeroes, | |
17 | * depending on the FPU hardware format: | |
18 | */ | |
c47ada30 | 19 | union fpregs_state init_fpstate __read_mostly; |
6f575023 | 20 | |
085cc281 IM |
21 | /* |
22 | * Track whether the kernel is using the FPU state | |
23 | * currently. | |
24 | * | |
25 | * This flag is used: | |
26 | * | |
27 | * - by IRQ context code to potentially use the FPU | |
28 | * if it's unused. | |
29 | * | |
30 | * - to debug kernel_fpu_begin()/end() correctness | |
31 | */ | |
14e153ef ON |
32 | static DEFINE_PER_CPU(bool, in_kernel_fpu); |
33 | ||
b0c050c5 | 34 | /* |
36b544dc | 35 | * Track which context is using the FPU on the CPU: |
b0c050c5 | 36 | */ |
36b544dc | 37 | DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx); |
b0c050c5 | 38 | |
416d49ac | 39 | static void kernel_fpu_disable(void) |
7575637a | 40 | { |
e97131a8 | 41 | WARN_ON_FPU(this_cpu_read(in_kernel_fpu)); |
7575637a ON |
42 | this_cpu_write(in_kernel_fpu, true); |
43 | } | |
44 | ||
416d49ac | 45 | static void kernel_fpu_enable(void) |
7575637a | 46 | { |
e97131a8 | 47 | WARN_ON_FPU(!this_cpu_read(in_kernel_fpu)); |
7575637a ON |
48 | this_cpu_write(in_kernel_fpu, false); |
49 | } | |
50 | ||
085cc281 IM |
51 | static bool kernel_fpu_disabled(void) |
52 | { | |
53 | return this_cpu_read(in_kernel_fpu); | |
54 | } | |
55 | ||
8546c008 LT |
56 | /* |
57 | * Were we in an interrupt that interrupted kernel mode? | |
58 | * | |
304bceda | 59 | * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that |
8546c008 LT |
60 | * pair does nothing at all: the thread must not have fpu (so |
61 | * that we don't try to save the FPU state), and TS must | |
62 | * be set (so that the clts/stts pair does nothing that is | |
63 | * visible in the interrupted kernel thread). | |
5187b28f | 64 | * |
4b2e762e ON |
65 | * Except for the eagerfpu case when we return true; in the likely case |
66 | * the thread has FPU but we are not going to set/clear TS. | |
8546c008 | 67 | */ |
416d49ac | 68 | static bool interrupted_kernel_fpu_idle(void) |
8546c008 | 69 | { |
085cc281 | 70 | if (kernel_fpu_disabled()) |
14e153ef ON |
71 | return false; |
72 | ||
5d2bd700 | 73 | if (use_eager_fpu()) |
4b2e762e | 74 | return true; |
304bceda | 75 | |
d5cea9b0 | 76 | return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS); |
8546c008 LT |
77 | } |
78 | ||
79 | /* | |
80 | * Were we in user mode (or vm86 mode) when we were | |
81 | * interrupted? | |
82 | * | |
83 | * Doing kernel_fpu_begin/end() is ok if we are running | |
84 | * in an interrupt context from user mode - we'll just | |
85 | * save the FPU state as required. | |
86 | */ | |
416d49ac | 87 | static bool interrupted_user_mode(void) |
8546c008 LT |
88 | { |
89 | struct pt_regs *regs = get_irq_regs(); | |
f39b6f0e | 90 | return regs && user_mode(regs); |
8546c008 LT |
91 | } |
92 | ||
93 | /* | |
94 | * Can we use the FPU in kernel mode with the | |
95 | * whole "kernel_fpu_begin/end()" sequence? | |
96 | * | |
97 | * It's always ok in process context (ie "not interrupt") | |
98 | * but it is sometimes ok even from an irq. | |
99 | */ | |
100 | bool irq_fpu_usable(void) | |
101 | { | |
102 | return !in_interrupt() || | |
103 | interrupted_user_mode() || | |
104 | interrupted_kernel_fpu_idle(); | |
105 | } | |
106 | EXPORT_SYMBOL(irq_fpu_usable); | |
107 | ||
b1a74bf8 | 108 | void __kernel_fpu_begin(void) |
8546c008 | 109 | { |
36b544dc | 110 | struct fpu *fpu = ¤t->thread.fpu; |
8546c008 | 111 | |
e97131a8 | 112 | WARN_ON_FPU(!irq_fpu_usable()); |
63c6680c | 113 | |
3103ae3a | 114 | kernel_fpu_disable(); |
14e153ef | 115 | |
d5cea9b0 | 116 | if (fpu->fpregs_active) { |
5ed73f40 AL |
117 | /* |
118 | * Ignore return value -- we don't care if reg state | |
119 | * is clobbered. | |
120 | */ | |
4f836347 | 121 | copy_fpregs_to_fpstate(fpu); |
7aeccb83 | 122 | } else { |
36b544dc | 123 | this_cpu_write(fpu_fpregs_owner_ctx, NULL); |
32b49b3c | 124 | __fpregs_activate_hw(); |
8546c008 LT |
125 | } |
126 | } | |
b1a74bf8 | 127 | EXPORT_SYMBOL(__kernel_fpu_begin); |
8546c008 | 128 | |
b1a74bf8 | 129 | void __kernel_fpu_end(void) |
8546c008 | 130 | { |
af2d94fd | 131 | struct fpu *fpu = ¤t->thread.fpu; |
33a3ebdc | 132 | |
9ccc27a5 | 133 | if (fpu->fpregs_active) |
003e2e8b | 134 | copy_kernel_to_fpregs(&fpu->state); |
9ccc27a5 | 135 | else |
32b49b3c | 136 | __fpregs_deactivate_hw(); |
14e153ef | 137 | |
3103ae3a | 138 | kernel_fpu_enable(); |
8546c008 | 139 | } |
b1a74bf8 | 140 | EXPORT_SYMBOL(__kernel_fpu_end); |
8546c008 | 141 | |
d63e79b1 IM |
142 | void kernel_fpu_begin(void) |
143 | { | |
144 | preempt_disable(); | |
d63e79b1 IM |
145 | __kernel_fpu_begin(); |
146 | } | |
147 | EXPORT_SYMBOL_GPL(kernel_fpu_begin); | |
148 | ||
149 | void kernel_fpu_end(void) | |
150 | { | |
151 | __kernel_fpu_end(); | |
152 | preempt_enable(); | |
153 | } | |
154 | EXPORT_SYMBOL_GPL(kernel_fpu_end); | |
155 | ||
91066588 IM |
156 | /* |
157 | * CR0::TS save/restore functions: | |
158 | */ | |
159 | int irq_ts_save(void) | |
160 | { | |
161 | /* | |
162 | * If in process context and not atomic, we can take a spurious DNA fault. | |
163 | * Otherwise, doing clts() in process context requires disabling preemption | |
164 | * or some heavy lifting like kernel_fpu_begin() | |
165 | */ | |
166 | if (!in_atomic()) | |
167 | return 0; | |
168 | ||
169 | if (read_cr0() & X86_CR0_TS) { | |
170 | clts(); | |
171 | return 1; | |
172 | } | |
173 | ||
174 | return 0; | |
175 | } | |
176 | EXPORT_SYMBOL_GPL(irq_ts_save); | |
177 | ||
178 | void irq_ts_restore(int TS_state) | |
179 | { | |
180 | if (TS_state) | |
181 | stts(); | |
182 | } | |
183 | EXPORT_SYMBOL_GPL(irq_ts_restore); | |
184 | ||
4af08f2f | 185 | /* |
48c4717f | 186 | * Save the FPU state (mark it for reload if necessary): |
87cdb98a IM |
187 | * |
188 | * This only ever gets called for the current task. | |
4af08f2f | 189 | */ |
0c070595 | 190 | void fpu__save(struct fpu *fpu) |
8546c008 | 191 | { |
e97131a8 | 192 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
87cdb98a | 193 | |
8546c008 | 194 | preempt_disable(); |
d5cea9b0 | 195 | if (fpu->fpregs_active) { |
5ed73f40 AL |
196 | if (!copy_fpregs_to_fpstate(fpu)) { |
197 | if (use_eager_fpu()) | |
198 | copy_kernel_to_fpregs(&fpu->state); | |
199 | else | |
200 | fpregs_deactivate(fpu); | |
201 | } | |
a9241ea5 | 202 | } |
8546c008 LT |
203 | preempt_enable(); |
204 | } | |
4af08f2f | 205 | EXPORT_SYMBOL_GPL(fpu__save); |
8546c008 | 206 | |
0aba6978 IM |
207 | /* |
208 | * Legacy x87 fpstate state init: | |
209 | */ | |
c47ada30 | 210 | static inline void fpstate_init_fstate(struct fregs_state *fp) |
0aba6978 IM |
211 | { |
212 | fp->cwd = 0xffff037fu; | |
213 | fp->swd = 0xffff0000u; | |
214 | fp->twd = 0xffffffffu; | |
215 | fp->fos = 0xffff0000u; | |
216 | } | |
217 | ||
c47ada30 | 218 | void fpstate_init(union fpregs_state *state) |
1da177e4 | 219 | { |
a402a8df | 220 | if (!static_cpu_has(X86_FEATURE_FPU)) { |
bf935b0b | 221 | fpstate_init_soft(&state->soft); |
86603283 | 222 | return; |
e8a496ac | 223 | } |
e8a496ac | 224 | |
bf935b0b | 225 | memset(state, 0, xstate_size); |
1d23c451 | 226 | |
0aba6978 | 227 | if (cpu_has_fxsr) |
bf935b0b | 228 | fpstate_init_fxstate(&state->fxsave); |
0aba6978 | 229 | else |
bf935b0b | 230 | fpstate_init_fstate(&state->fsave); |
86603283 | 231 | } |
c0ee2cf6 | 232 | EXPORT_SYMBOL_GPL(fpstate_init); |
86603283 | 233 | |
a20d7297 | 234 | int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu) |
e102f30f | 235 | { |
a20d7297 AL |
236 | dst_fpu->counter = 0; |
237 | dst_fpu->fpregs_active = 0; | |
238 | dst_fpu->last_cpu = -1; | |
239 | ||
a402a8df | 240 | if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)) |
a20d7297 AL |
241 | return 0; |
242 | ||
e97131a8 | 243 | WARN_ON_FPU(src_fpu != ¤t->thread.fpu); |
bfd6fc05 | 244 | |
b1652900 IM |
245 | /* |
246 | * Don't let 'init optimized' areas of the XSAVE area | |
247 | * leak into the child task: | |
248 | */ | |
249 | if (use_eager_fpu()) | |
7366ed77 | 250 | memset(&dst_fpu->state.xsave, 0, xstate_size); |
b1652900 IM |
251 | |
252 | /* | |
253 | * Save current FPU registers directly into the child | |
254 | * FPU context, without any memory-to-memory copying. | |
a20d7297 AL |
255 | * In lazy mode, if the FPU context isn't loaded into |
256 | * fpregs, CR0.TS will be set and do_device_not_available | |
257 | * will load the FPU context. | |
b1652900 IM |
258 | * |
259 | * We have to do all this with preemption disabled, | |
260 | * mostly because of the FNSAVE case, because in that | |
261 | * case we must not allow preemption in the window | |
262 | * between the FNSAVE and us marking the context lazy. | |
263 | * | |
264 | * It shouldn't be an issue as even FNSAVE is plenty | |
265 | * fast in terms of critical section length. | |
266 | */ | |
267 | preempt_disable(); | |
268 | if (!copy_fpregs_to_fpstate(dst_fpu)) { | |
269 | memcpy(&src_fpu->state, &dst_fpu->state, xstate_size); | |
5ed73f40 AL |
270 | |
271 | if (use_eager_fpu()) | |
272 | copy_kernel_to_fpregs(&src_fpu->state); | |
273 | else | |
274 | fpregs_deactivate(src_fpu); | |
e102f30f | 275 | } |
b1652900 | 276 | preempt_enable(); |
c4d6ee6e | 277 | |
a752b53d IM |
278 | return 0; |
279 | } | |
280 | ||
97185c95 | 281 | /* |
c4d72e2d IM |
282 | * Activate the current task's in-memory FPU context, |
283 | * if it has not been used before: | |
97185c95 | 284 | */ |
c4d72e2d | 285 | void fpu__activate_curr(struct fpu *fpu) |
97185c95 | 286 | { |
e97131a8 | 287 | WARN_ON_FPU(fpu != ¤t->thread.fpu); |
97185c95 | 288 | |
c4d72e2d | 289 | if (!fpu->fpstate_active) { |
bf935b0b | 290 | fpstate_init(&fpu->state); |
97185c95 | 291 | |
c4d72e2d IM |
292 | /* Safe to do for the current task: */ |
293 | fpu->fpstate_active = 1; | |
294 | } | |
97185c95 | 295 | } |
c4d72e2d | 296 | EXPORT_SYMBOL_GPL(fpu__activate_curr); |
97185c95 | 297 | |
05602812 IM |
298 | /* |
299 | * This function must be called before we read a task's fpstate. | |
300 | * | |
301 | * If the task has not used the FPU before then initialize its | |
302 | * fpstate. | |
303 | * | |
304 | * If the task has used the FPU before then save it. | |
305 | */ | |
306 | void fpu__activate_fpstate_read(struct fpu *fpu) | |
307 | { | |
308 | /* | |
309 | * If fpregs are active (in the current CPU), then | |
310 | * copy them to the fpstate: | |
311 | */ | |
312 | if (fpu->fpregs_active) { | |
313 | fpu__save(fpu); | |
314 | } else { | |
9ba6b791 | 315 | if (!fpu->fpstate_active) { |
05602812 IM |
316 | fpstate_init(&fpu->state); |
317 | ||
318 | /* Safe to do for current and for stopped child tasks: */ | |
319 | fpu->fpstate_active = 1; | |
320 | } | |
321 | } | |
322 | } | |
323 | ||
86603283 | 324 | /* |
343763c3 | 325 | * This function must be called before we write a task's fpstate. |
af7f8721 | 326 | * |
343763c3 IM |
327 | * If the task has used the FPU before then unlazy it. |
328 | * If the task has not used the FPU before then initialize its fpstate. | |
af7f8721 | 329 | * |
343763c3 IM |
330 | * After this function call, after registers in the fpstate are |
331 | * modified and the child task has woken up, the child task will | |
332 | * restore the modified FPU state from the modified context. If we | |
333 | * didn't clear its lazy status here then the lazy in-registers | |
334 | * state pending on its former CPU could be restored, corrupting | |
335 | * the modifications. | |
86603283 | 336 | */ |
6a81d7eb | 337 | void fpu__activate_fpstate_write(struct fpu *fpu) |
86603283 | 338 | { |
47f01e8c | 339 | /* |
343763c3 IM |
340 | * Only stopped child tasks can be used to modify the FPU |
341 | * state in the fpstate buffer: | |
47f01e8c | 342 | */ |
343763c3 IM |
343 | WARN_ON_FPU(fpu == ¤t->thread.fpu); |
344 | ||
345 | if (fpu->fpstate_active) { | |
346 | /* Invalidate any lazy state: */ | |
347 | fpu->last_cpu = -1; | |
2fb29fc7 | 348 | } else { |
343763c3 | 349 | fpstate_init(&fpu->state); |
47f01e8c | 350 | |
343763c3 IM |
351 | /* Safe to do for stopped child tasks: */ |
352 | fpu->fpstate_active = 1; | |
2fb29fc7 | 353 | } |
1da177e4 LT |
354 | } |
355 | ||
b8b9b6ba DH |
356 | /* |
357 | * This function must be called before we write the current | |
358 | * task's fpstate. | |
359 | * | |
360 | * This call gets the current FPU register state and moves | |
361 | * it in to the 'fpstate'. Preemption is disabled so that | |
362 | * no writes to the 'fpstate' can occur from context | |
363 | * swiches. | |
364 | * | |
365 | * Must be followed by a fpu__current_fpstate_write_end(). | |
366 | */ | |
367 | void fpu__current_fpstate_write_begin(void) | |
368 | { | |
369 | struct fpu *fpu = ¤t->thread.fpu; | |
370 | ||
371 | /* | |
372 | * Ensure that the context-switching code does not write | |
373 | * over the fpstate while we are doing our update. | |
374 | */ | |
375 | preempt_disable(); | |
376 | ||
377 | /* | |
378 | * Move the fpregs in to the fpu's 'fpstate'. | |
379 | */ | |
380 | fpu__activate_fpstate_read(fpu); | |
381 | ||
382 | /* | |
383 | * The caller is about to write to 'fpu'. Ensure that no | |
384 | * CPU thinks that its fpregs match the fpstate. This | |
385 | * ensures we will not be lazy and skip a XRSTOR in the | |
386 | * future. | |
387 | */ | |
388 | fpu->last_cpu = -1; | |
389 | } | |
390 | ||
391 | /* | |
392 | * This function must be paired with fpu__current_fpstate_write_begin() | |
393 | * | |
394 | * This will ensure that the modified fpstate gets placed back in | |
395 | * the fpregs if necessary. | |
396 | * | |
397 | * Note: This function may be called whether or not an _actual_ | |
398 | * write to the fpstate occurred. | |
399 | */ | |
400 | void fpu__current_fpstate_write_end(void) | |
401 | { | |
402 | struct fpu *fpu = ¤t->thread.fpu; | |
403 | ||
404 | /* | |
405 | * 'fpu' now has an updated copy of the state, but the | |
406 | * registers may still be out of date. Update them with | |
407 | * an XRSTOR if they are active. | |
408 | */ | |
409 | if (fpregs_active()) | |
410 | copy_kernel_to_fpregs(&fpu->state); | |
411 | ||
412 | /* | |
413 | * Our update is done and the fpregs/fpstate are in sync | |
414 | * if necessary. Context switches can happen again. | |
415 | */ | |
416 | preempt_enable(); | |
417 | } | |
418 | ||
93b90712 | 419 | /* |
be7436d5 IM |
420 | * 'fpu__restore()' is called to copy FPU registers from |
421 | * the FPU fpstate to the live hw registers and to activate | |
422 | * access to the hardware registers, so that FPU instructions | |
423 | * can be used afterwards. | |
93b90712 | 424 | * |
be7436d5 IM |
425 | * Must be called with kernel preemption disabled (for example |
426 | * with local interrupts disabled, as it is in the case of | |
427 | * do_device_not_available()). | |
93b90712 | 428 | */ |
e1884d69 | 429 | void fpu__restore(struct fpu *fpu) |
93b90712 | 430 | { |
c4d72e2d | 431 | fpu__activate_curr(fpu); |
93b90712 | 432 | |
232f62cd | 433 | /* Avoid __kernel_fpu_begin() right after fpregs_activate() */ |
93b90712 | 434 | kernel_fpu_disable(); |
232f62cd | 435 | fpregs_activate(fpu); |
003e2e8b | 436 | copy_kernel_to_fpregs(&fpu->state); |
9ccc27a5 | 437 | fpu->counter++; |
93b90712 IM |
438 | kernel_fpu_enable(); |
439 | } | |
3a0aee48 | 440 | EXPORT_SYMBOL_GPL(fpu__restore); |
93b90712 | 441 | |
6ffc152e IM |
442 | /* |
443 | * Drops current FPU state: deactivates the fpregs and | |
444 | * the fpstate. NOTE: it still leaves previous contents | |
445 | * in the fpregs in the eager-FPU case. | |
446 | * | |
447 | * This function can be used in cases where we know that | |
448 | * a state-restore is coming: either an explicit one, | |
449 | * or a reschedule. | |
450 | */ | |
451 | void fpu__drop(struct fpu *fpu) | |
452 | { | |
453 | preempt_disable(); | |
454 | fpu->counter = 0; | |
455 | ||
456 | if (fpu->fpregs_active) { | |
457 | /* Ignore delayed exceptions from user space */ | |
458 | asm volatile("1: fwait\n" | |
459 | "2:\n" | |
460 | _ASM_EXTABLE(1b, 2b)); | |
461 | fpregs_deactivate(fpu); | |
462 | } | |
463 | ||
464 | fpu->fpstate_active = 0; | |
465 | ||
466 | preempt_enable(); | |
467 | } | |
468 | ||
81541889 IM |
469 | /* |
470 | * Clear FPU registers by setting them up from | |
471 | * the init fpstate: | |
472 | */ | |
473 | static inline void copy_init_fpstate_to_fpregs(void) | |
474 | { | |
475 | if (use_xsave()) | |
c6813144 | 476 | copy_kernel_to_xregs(&init_fpstate.xsave, -1); |
6e686709 | 477 | else if (static_cpu_has(X86_FEATURE_FXSR)) |
c6813144 | 478 | copy_kernel_to_fxregs(&init_fpstate.fxsave); |
6e686709 BP |
479 | else |
480 | copy_kernel_to_fregs(&init_fpstate.fsave); | |
81541889 IM |
481 | } |
482 | ||
6ffc152e | 483 | /* |
fbce7782 IM |
484 | * Clear the FPU state back to init state. |
485 | * | |
486 | * Called by sys_execve(), by the signal handler code and by various | |
487 | * error paths. | |
2e85591a | 488 | */ |
04c8e01d | 489 | void fpu__clear(struct fpu *fpu) |
81683cc8 | 490 | { |
e97131a8 | 491 | WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ |
4c138410 | 492 | |
4ecd16ec | 493 | if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { |
81683cc8 | 494 | /* FPU state will be reallocated lazily at the first use. */ |
50338615 | 495 | fpu__drop(fpu); |
81683cc8 | 496 | } else { |
c5bedc68 | 497 | if (!fpu->fpstate_active) { |
c4d72e2d | 498 | fpu__activate_curr(fpu); |
81683cc8 IM |
499 | user_fpu_begin(); |
500 | } | |
81541889 | 501 | copy_init_fpstate_to_fpregs(); |
81683cc8 IM |
502 | } |
503 | } | |
504 | ||
e1cebad4 IM |
505 | /* |
506 | * x87 math exception handling: | |
507 | */ | |
508 | ||
509 | static inline unsigned short get_fpu_cwd(struct fpu *fpu) | |
510 | { | |
511 | if (cpu_has_fxsr) { | |
512 | return fpu->state.fxsave.cwd; | |
513 | } else { | |
514 | return (unsigned short)fpu->state.fsave.cwd; | |
515 | } | |
516 | } | |
517 | ||
518 | static inline unsigned short get_fpu_swd(struct fpu *fpu) | |
519 | { | |
520 | if (cpu_has_fxsr) { | |
521 | return fpu->state.fxsave.swd; | |
522 | } else { | |
523 | return (unsigned short)fpu->state.fsave.swd; | |
524 | } | |
525 | } | |
526 | ||
527 | static inline unsigned short get_fpu_mxcsr(struct fpu *fpu) | |
528 | { | |
dda9edf7 | 529 | if (boot_cpu_has(X86_FEATURE_XMM)) { |
e1cebad4 IM |
530 | return fpu->state.fxsave.mxcsr; |
531 | } else { | |
532 | return MXCSR_DEFAULT; | |
533 | } | |
534 | } | |
535 | ||
536 | int fpu__exception_code(struct fpu *fpu, int trap_nr) | |
537 | { | |
538 | int err; | |
539 | ||
540 | if (trap_nr == X86_TRAP_MF) { | |
541 | unsigned short cwd, swd; | |
542 | /* | |
543 | * (~cwd & swd) will mask out exceptions that are not set to unmasked | |
544 | * status. 0x3f is the exception bits in these regs, 0x200 is the | |
545 | * C1 reg you need in case of a stack fault, 0x040 is the stack | |
546 | * fault bit. We should only be taking one exception at a time, | |
547 | * so if this combination doesn't produce any single exception, | |
548 | * then we have a bad program that isn't synchronizing its FPU usage | |
549 | * and it will suffer the consequences since we won't be able to | |
550 | * fully reproduce the context of the exception | |
551 | */ | |
552 | cwd = get_fpu_cwd(fpu); | |
553 | swd = get_fpu_swd(fpu); | |
554 | ||
555 | err = swd & ~cwd; | |
556 | } else { | |
557 | /* | |
558 | * The SIMD FPU exceptions are handled a little differently, as there | |
559 | * is only a single status/control register. Thus, to determine which | |
560 | * unmasked exception was caught we must mask the exception mask bits | |
561 | * at 0x1f80, and then use these to mask the exception bits at 0x3f. | |
562 | */ | |
563 | unsigned short mxcsr = get_fpu_mxcsr(fpu); | |
564 | err = ~(mxcsr >> 7) & mxcsr; | |
565 | } | |
566 | ||
567 | if (err & 0x001) { /* Invalid op */ | |
568 | /* | |
569 | * swd & 0x240 == 0x040: Stack Underflow | |
570 | * swd & 0x240 == 0x240: Stack Overflow | |
571 | * User must clear the SF bit (0x40) if set | |
572 | */ | |
573 | return FPE_FLTINV; | |
574 | } else if (err & 0x004) { /* Divide by Zero */ | |
575 | return FPE_FLTDIV; | |
576 | } else if (err & 0x008) { /* Overflow */ | |
577 | return FPE_FLTOVF; | |
578 | } else if (err & 0x012) { /* Denormal, Underflow */ | |
579 | return FPE_FLTUND; | |
580 | } else if (err & 0x020) { /* Precision */ | |
581 | return FPE_FLTRES; | |
582 | } | |
583 | ||
584 | /* | |
585 | * If we're using IRQ 13, or supposedly even some trap | |
586 | * X86_TRAP_MF implementations, it's possible | |
587 | * we get a spurious trap, which is not an error. | |
588 | */ | |
589 | return 0; | |
590 | } |