Commit | Line | Data |
---|---|---|
1eeaed76 RM |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
1965aae3 PA |
10 | #ifndef _ASM_X86_I387_H |
11 | #define _ASM_X86_I387_H | |
1eeaed76 | 12 | |
3b0d6596 HX |
13 | #ifndef __ASSEMBLY__ |
14 | ||
1eeaed76 | 15 | #include <linux/sched.h> |
e4914012 | 16 | #include <linux/hardirq.h> |
1361b83a LT |
17 | |
18 | struct pt_regs; | |
19 | struct user_i387_struct; | |
1eeaed76 | 20 | |
aa283f49 | 21 | extern int init_fpu(struct task_struct *child); |
304bceda | 22 | extern void fpu_finit(struct fpu *fpu); |
36454936 | 23 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
1361b83a | 24 | extern void math_state_restore(void); |
1eeaed76 | 25 | |
8546c008 | 26 | extern bool irq_fpu_usable(void); |
b1a74bf8 SS |
27 | |
28 | /* | |
29 | * Careful: __kernel_fpu_begin/end() must be called with preempt disabled | |
30 | * and they don't touch the preempt state on their own. | |
31 | * If you enable preemption after __kernel_fpu_begin(), preempt notifier | |
32 | * should call the __kernel_fpu_end() to prevent the kernel/user FPU | |
33 | * state from getting corrupted. KVM for example uses this model. | |
34 | * | |
35 | * All other cases use kernel_fpu_begin/end() which disable preemption | |
36 | * during kernel FPU usage. | |
37 | */ | |
38 | extern void __kernel_fpu_begin(void); | |
39 | extern void __kernel_fpu_end(void); | |
40 | ||
41 | static inline void kernel_fpu_begin(void) | |
42 | { | |
b1a74bf8 | 43 | preempt_disable(); |
14e153ef | 44 | WARN_ON_ONCE(!irq_fpu_usable()); |
b1a74bf8 SS |
45 | __kernel_fpu_begin(); |
46 | } | |
47 | ||
48 | static inline void kernel_fpu_end(void) | |
49 | { | |
50 | __kernel_fpu_end(); | |
51 | preempt_enable(); | |
52 | } | |
1eeaed76 | 53 | |
7575637a ON |
54 | /* Must be called with preempt disabled */ |
55 | extern void kernel_fpu_disable(void); | |
56 | extern void kernel_fpu_enable(void); | |
57 | ||
e4914012 SS |
58 | /* |
59 | * Some instructions like VIA's padlock instructions generate a spurious | |
60 | * DNA fault but don't modify SSE registers. And these instructions | |
0b8c3d5a CE |
61 | * get used from interrupt context as well. To prevent these kernel instructions |
62 | * in interrupt context interacting wrongly with other user/kernel fpu usage, we | |
e4914012 SS |
63 | * should use them only in the context of irq_ts_save/restore() |
64 | */ | |
65 | static inline int irq_ts_save(void) | |
66 | { | |
67 | /* | |
0b8c3d5a CE |
68 | * If in process context and not atomic, we can take a spurious DNA fault. |
69 | * Otherwise, doing clts() in process context requires disabling preemption | |
70 | * or some heavy lifting like kernel_fpu_begin() | |
e4914012 | 71 | */ |
0b8c3d5a | 72 | if (!in_atomic()) |
e4914012 SS |
73 | return 0; |
74 | ||
75 | if (read_cr0() & X86_CR0_TS) { | |
76 | clts(); | |
77 | return 1; | |
78 | } | |
79 | ||
80 | return 0; | |
81 | } | |
82 | ||
83 | static inline void irq_ts_restore(int TS_state) | |
84 | { | |
85 | if (TS_state) | |
86 | stts(); | |
87 | } | |
88 | ||
15d8791c LT |
89 | /* |
90 | * The question "does this thread have fpu access?" | |
91 | * is slightly racy, since preemption could come in | |
92 | * and revoke it immediately after the test. | |
93 | * | |
94 | * However, even in that very unlikely scenario, | |
95 | * we can just assume we have FPU access - typically | |
96 | * to save the FP state - we'll just take a #NM | |
97 | * fault and get the FPU access back. | |
15d8791c LT |
98 | */ |
99 | static inline int user_has_fpu(void) | |
100 | { | |
1361b83a | 101 | return current->thread.fpu.has_fpu; |
1eeaed76 RM |
102 | } |
103 | ||
8546c008 | 104 | extern void unlazy_fpu(struct task_struct *tsk); |
1eeaed76 | 105 | |
3b0d6596 HX |
106 | #endif /* __ASSEMBLY__ */ |
107 | ||
1965aae3 | 108 | #endif /* _ASM_X86_I387_H */ |