Merge tag 'pm-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
[linux-2.6-block.git] / arch / x86 / kernel / fpu / core.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4 2/*
1da177e4
LT
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Pentium III FXSR, SSE support
6 * General FPU state handling cleanups
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
b56d2795 9#include <asm/fpu/api.h>
59a36d16 10#include <asm/fpu/regset.h>
63e81807 11#include <asm/fpu/sched.h>
fcbc99c4 12#include <asm/fpu/signal.h>
35ac2d7b 13#include <asm/fpu/types.h>
efef7f18 14#include <asm/msr.h>
e1cebad4 15#include <asm/traps.h>
447ae316 16#include <asm/irq_regs.h>
fcbc99c4 17
d187ba53
SC
18#include <uapi/asm/kvm.h>
19
91066588 20#include <linux/hardirq.h>
acd547b2 21#include <linux/pkeys.h>
868c250b 22#include <linux/vmalloc.h>
1da177e4 23
9848fb96 24#include "context.h"
d06241f5 25#include "internal.h"
34002571 26#include "legacy.h"
126fe040
TG
27#include "xstate.h"
28
d1898b73
DH
29#define CREATE_TRACE_POINTS
30#include <asm/trace/fpu.h>
31
23686ef2
TG
32#ifdef CONFIG_X86_64
33DEFINE_STATIC_KEY_FALSE(__fpu_state_size_dynamic);
8bf26758 34DEFINE_PER_CPU(u64, xfd_state);
23686ef2
TG
35#endif
36
578971f4
TG
37/* The FPU state configuration data for kernel and user space */
38struct fpu_state_config fpu_kernel_cfg __ro_after_init;
39struct fpu_state_config fpu_user_cfg __ro_after_init;
40
6f575023
IM
41/*
42 * Represents the initial FPU state. It's mostly (but not completely) zeroes,
43 * depending on the FPU hardware format:
44 */
f83ac56a 45struct fpstate init_fpstate __ro_after_init;
6f575023 46
2297554f
EB
47/*
48 * Track FPU initialization and kernel-mode usage. 'true' means the FPU is
49 * initialized and is not currently being used by the kernel:
50 */
51DEFINE_PER_CPU(bool, kernel_fpu_allowed);
14e153ef 52
b0c050c5 53/*
36b544dc 54 * Track which context is using the FPU on the CPU:
b0c050c5 55 */
36b544dc 56DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
b0c050c5 57
22aafe3b
IM
58#ifdef CONFIG_X86_DEBUG_FPU
59struct fpu *x86_task_fpu(struct task_struct *task)
60{
61 if (WARN_ON_ONCE(task->flags & PF_KTHREAD))
62 return NULL;
63
64 return (void *)task + sizeof(*task);
65}
66#endif
67
8546c008
LT
68/*
69 * Can we use the FPU in kernel mode with the
70 * whole "kernel_fpu_begin/end()" sequence?
8546c008
LT
71 */
72bool irq_fpu_usable(void)
73{
59f5ede3
TG
74 if (WARN_ON_ONCE(in_nmi()))
75 return false;
76
d0219855 77 /*
2297554f
EB
78 * Return false in the following cases:
79 *
80 * - FPU is not yet initialized. This can happen only when the call is
81 * coming from CPU onlining, for example for microcode checksumming.
82 * - The kernel is already using the FPU, either because of explicit
83 * nesting (which should never be done), or because of implicit
84 * nesting when a hardirq interrupted a kernel-mode FPU section.
85 *
86 * The single boolean check below handles both cases:
d0219855 87 */
2297554f 88 if (!this_cpu_read(kernel_fpu_allowed))
59f5ede3
TG
89 return false;
90
91 /*
92 * When not in NMI or hard interrupt context, FPU can be used in:
93 *
94 * - Task context except from within fpregs_lock()'ed critical
95 * regions.
96 *
97 * - Soft interrupt processing context which cannot happen
98 * while in a fpregs_lock()'ed critical region.
99 */
100 if (!in_hardirq())
101 return true;
102
103 /*
104 * In hard interrupt context it's safe when soft interrupts
105 * are enabled, which means the interrupt did not hit in
106 * a fpregs_lock()'ed critical region.
107 */
108 return !softirq_count();
8546c008
LT
109}
110EXPORT_SYMBOL(irq_fpu_usable);
111
0fe4ff88
NG
112/*
113 * Track AVX512 state use because it is known to slow the max clock
114 * speed of the core.
115 */
116static void update_avx_timestamp(struct fpu *fpu)
117{
118
119#define AVX512_TRACKING_MASK (XFEATURE_MASK_ZMM_Hi256 | XFEATURE_MASK_Hi16_ZMM)
120
121 if (fpu->fpstate->regs.xsave.header.xfeatures & AVX512_TRACKING_MASK)
122 fpu->avx512_timestamp = jiffies;
123}
124
a063bf24 125/*
c20942ce 126 * Save the FPU register state in fpu->fpstate->regs. The register state is
08ded2cd 127 * preserved.
a063bf24 128 *
08ded2cd
TG
129 * Must be called with fpregs_lock() held.
130 *
131 * The legacy FNSAVE instruction clears all FPU state unconditionally, so
132 * register state has to be reloaded. That might be a pointless exercise
133 * when the FPU is going to be used by another task right after that. But
134 * this only affects 20+ years old 32bit systems and avoids conditionals all
135 * over the place.
136 *
137 * FXSAVE and all XSAVE variants preserve the FPU register state.
a063bf24 138 */
08ded2cd 139void save_fpregs_to_fpstate(struct fpu *fpu)
a063bf24
KL
140{
141 if (likely(use_xsave())) {
073e627a 142 os_xsave(fpu->fpstate);
0fe4ff88 143 update_avx_timestamp(fpu);
08ded2cd 144 return;
a063bf24
KL
145 }
146
147 if (likely(use_fxsr())) {
c20942ce 148 fxsave(&fpu->fpstate->regs.fxsave);
08ded2cd 149 return;
a063bf24
KL
150 }
151
152 /*
153 * Legacy FPU register saving, FNSAVE always clears FPU registers,
08ded2cd 154 * so we have to reload them from the memory state.
a063bf24 155 */
c20942ce
TG
156 asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->fpstate->regs.fsave));
157 frstor(&fpu->fpstate->regs.fsave);
a063bf24 158}
a063bf24 159
18b3fa1a 160void restore_fpregs_from_fpstate(struct fpstate *fpstate, u64 mask)
1d9bffab
TG
161{
162 /*
163 * AMD K7/K8 and later CPUs up to Zen don't save/restore
164 * FDP/FIP/FOP unless an exception is pending. Clear the x87 state
165 * here by setting it to fixed values. "m" is a random variable
166 * that should be in L1.
167 */
168 if (unlikely(static_cpu_has_bug(X86_BUG_FXSAVE_LEAK))) {
169 asm volatile(
170 "fnclex\n\t"
171 "emms\n\t"
5d31174f
UB
172 "fildl %[addr]" /* set F?P to defined value */
173 : : [addr] "m" (*fpstate));
1d9bffab
TG
174 }
175
176 if (use_xsave()) {
67236547
CB
177 /*
178 * Dynamically enabled features are enabled in XCR0, but
179 * usage requires also that the corresponding bits in XFD
180 * are cleared. If the bits are set then using a related
181 * instruction will raise #NM. This allows to do the
182 * allocation of the larger FPU buffer lazy from #NM or if
183 * the task has no permission to kill it which would happen
184 * via #UD if the feature is disabled in XCR0.
185 *
186 * XFD state is following the same life time rules as
187 * XSTATE and to restore state correctly XFD has to be
188 * updated before XRSTORS otherwise the component would
189 * stay in or go into init state even if the bits are set
190 * in fpstate::regs::xsave::xfeatures.
191 */
192 xfd_update_state(fpstate);
193
eda32f4f
TG
194 /*
195 * Restoring state always needs to modify all features
196 * which are in @mask even if the current task cannot use
197 * extended features.
198 *
199 * So fpstate->xfeatures cannot be used here, because then
200 * a feature for which the task has no permission but was
201 * used by the previous task would not go into init state.
202 */
203 mask = fpu_kernel_cfg.max_features & mask;
204
5529acf4 205 os_xrstor(fpstate, mask);
1d9bffab
TG
206 } else {
207 if (use_fxsr())
18b3fa1a 208 fxrstor(&fpstate->regs.fxsave);
1d9bffab 209 else
18b3fa1a 210 frstor(&fpstate->regs.fsave);
1d9bffab
TG
211 }
212}
a0ff0611 213
079ec41b
TG
214void fpu_reset_from_exception_fixup(void)
215{
eda32f4f 216 restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE);
079ec41b
TG
217}
218
a0ff0611 219#if IS_ENABLED(CONFIG_KVM)
b0237dad 220static void __fpstate_reset(struct fpstate *fpstate, u64 xfd);
69f6ed1d 221
32d5fa80 222static void fpu_lock_guest_permissions(void)
36487e62
TG
223{
224 struct fpu_state_perm *fpuperm;
225 u64 perm;
226
227 if (!IS_ENABLED(CONFIG_X86_64))
228 return;
229
230 spin_lock_irq(&current->sighand->siglock);
e3bfa385 231 fpuperm = &x86_task_fpu(current->group_leader)->guest_perm;
36487e62
TG
232 perm = fpuperm->__state_perm;
233
234 /* First fpstate allocation locks down permissions. */
235 WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED);
236
237 spin_unlock_irq(&current->sighand->siglock);
36487e62 238}
69f6ed1d
TG
239
240bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
241{
242 struct fpstate *fpstate;
243 unsigned int size;
244
1937e18c 245 size = fpu_kernel_cfg.default_size + ALIGN(offsetof(struct fpstate, regs), 64);
69f6ed1d
TG
246 fpstate = vzalloc(size);
247 if (!fpstate)
248 return false;
249
b0237dad
JL
250 /* Leave xfd to 0 (the reset value defined by spec) */
251 __fpstate_reset(fpstate, 0);
69f6ed1d
TG
252 fpstate_init_user(fpstate);
253 fpstate->is_valloc = true;
254 fpstate->is_guest = true;
255
36487e62 256 gfpu->fpstate = fpstate;
dda36608 257 gfpu->xfeatures = fpu_kernel_cfg.default_features;
d187ba53
SC
258
259 /*
260 * KVM sets the FP+SSE bits in the XSAVE header when copying FPU state
261 * to userspace, even when XSAVE is unsupported, so that restoring FPU
262 * state on a different CPU that does support XSAVE can cleanly load
263 * the incoming state using its natural XSAVE. In other words, KVM's
264 * uABI size may be larger than this host's default size. Conversely,
265 * the default size should never be larger than KVM's base uABI size;
266 * all features that can expand the uABI size must be opt-in.
267 */
268 gfpu->uabi_size = sizeof(struct kvm_xsave);
269 if (WARN_ON_ONCE(fpu_user_cfg.default_size > gfpu->uabi_size))
270 gfpu->uabi_size = fpu_user_cfg.default_size;
271
32d5fa80 272 fpu_lock_guest_permissions();
36487e62 273
69f6ed1d
TG
274 return true;
275}
276EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
277
278void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
279{
8b2a7a72 280 struct fpstate *fpstate = gfpu->fpstate;
69f6ed1d 281
8b2a7a72 282 if (!fpstate)
69f6ed1d
TG
283 return;
284
8b2a7a72 285 if (WARN_ON_ONCE(!fpstate->is_valloc || !fpstate->is_guest || fpstate->in_use))
69f6ed1d
TG
286 return;
287
288 gfpu->fpstate = NULL;
8b2a7a72 289 vfree(fpstate);
69f6ed1d
TG
290}
291EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
292
0781d60f
SC
293/*
294 * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable
295 * @guest_fpu: Pointer to the guest FPU container
296 * @xfeatures: Features requested by guest CPUID
297 *
298 * Enable all dynamic xfeatures according to guest perm and requested CPUID.
299 *
300 * Return: 0 on success, error code otherwise
301 */
302int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures)
303{
304 lockdep_assert_preemption_enabled();
305
306 /* Nothing to do if all requested features are already enabled. */
307 xfeatures &= ~guest_fpu->xfeatures;
308 if (!xfeatures)
309 return 0;
310
311 return __xfd_enable_feature(xfeatures, guest_fpu);
312}
313EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
314
8eb9a48a
KT
315#ifdef CONFIG_X86_64
316void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
317{
318 fpregs_lock();
319 guest_fpu->fpstate->xfd = xfd;
320 if (guest_fpu->fpstate->in_use)
321 xfd_update_state(guest_fpu->fpstate);
322 fpregs_unlock();
323}
324EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
5429cead
TG
325
326/**
327 * fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state
328 *
329 * Must be invoked from KVM after a VMEXIT before enabling interrupts when
330 * XFD write emulation is disabled. This is required because the guest can
331 * freely modify XFD and the state at VMEXIT is not guaranteed to be the
54aa699e 332 * same as the state on VMENTER. So software state has to be updated before
5429cead
TG
333 * any operation which depends on it can take place.
334 *
335 * Note: It can be invoked unconditionally even when write emulation is
336 * enabled for the price of a then pointless MSR read.
337 */
338void fpu_sync_guest_vmexit_xfd_state(void)
339{
8b2a7a72 340 struct fpstate *fpstate = x86_task_fpu(current)->fpstate;
5429cead
TG
341
342 lockdep_assert_irqs_disabled();
343 if (fpu_state_size_dynamic()) {
1f82e8e1 344 rdmsrq(MSR_IA32_XFD, fpstate->xfd);
8b2a7a72 345 __this_cpu_write(xfd_state, fpstate->xfd);
5429cead
TG
346 }
347}
348EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
8eb9a48a
KT
349#endif /* CONFIG_X86_64 */
350
69f6ed1d
TG
351int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
352{
353 struct fpstate *guest_fps = guest_fpu->fpstate;
e3bfa385 354 struct fpu *fpu = x86_task_fpu(current);
69f6ed1d
TG
355 struct fpstate *cur_fps = fpu->fpstate;
356
357 fpregs_lock();
358 if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD))
359 save_fpregs_to_fpstate(fpu);
360
361 /* Swap fpstate */
362 if (enter_guest) {
363 fpu->__task_fpstate = cur_fps;
364 fpu->fpstate = guest_fps;
365 guest_fps->in_use = true;
366 } else {
367 guest_fps->in_use = false;
368 fpu->fpstate = fpu->__task_fpstate;
369 fpu->__task_fpstate = NULL;
370 }
371
372 cur_fps = fpu->fpstate;
373
67236547
CB
374 if (!cur_fps->is_confidential) {
375 /* Includes XFD update */
69f6ed1d 376 restore_fpregs_from_fpstate(cur_fps, XFEATURE_MASK_FPSTATE);
67236547
CB
377 } else {
378 /*
379 * XSTATE is restored by firmware from encrypted
380 * memory. Make sure XFD state is correct while
381 * running with guest fpstate
382 */
383 xfd_update_state(cur_fps);
384 }
69f6ed1d
TG
385
386 fpregs_mark_activate();
387 fpregs_unlock();
388 return 0;
389}
390EXPORT_SYMBOL_GPL(fpu_swap_kvm_fpstate);
391
d69c1382 392void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf,
18164f66 393 unsigned int size, u64 xfeatures, u32 pkru)
bf5d0047 394{
d69c1382 395 struct fpstate *kstate = gfpu->fpstate;
bf5d0047
TG
396 union fpregs_state *ustate = buf;
397 struct membuf mb = { .p = buf, .left = size };
398
399 if (cpu_feature_enabled(X86_FEATURE_XSAVE)) {
18164f66
SC
400 __copy_xstate_to_uabi_buf(mb, kstate, xfeatures, pkru,
401 XSTATE_COPY_XSAVE);
bf5d0047 402 } else {
3ac8d757
TG
403 memcpy(&ustate->fxsave, &kstate->regs.fxsave,
404 sizeof(ustate->fxsave));
bf5d0047
TG
405 /* Make it restorable on a XSAVE enabled host */
406 ustate->xsave.header.xfeatures = XFEATURE_MASK_FPSSE;
407 }
408}
d69c1382 409EXPORT_SYMBOL_GPL(fpu_copy_guest_fpstate_to_uabi);
bf5d0047 410
d69c1382
TG
411int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf,
412 u64 xcr0, u32 *vpkru)
ea4d6938 413{
d69c1382 414 struct fpstate *kstate = gfpu->fpstate;
ea4d6938 415 const union fpregs_state *ustate = buf;
ea4d6938
TG
416
417 if (!cpu_feature_enabled(X86_FEATURE_XSAVE)) {
418 if (ustate->xsave.header.xfeatures & ~XFEATURE_MASK_FPSSE)
419 return -EINVAL;
420 if (ustate->fxsave.mxcsr & ~mxcsr_feature_mask)
421 return -EINVAL;
ad6ede40 422 memcpy(&kstate->regs.fxsave, &ustate->fxsave, sizeof(ustate->fxsave));
ea4d6938
TG
423 return 0;
424 }
425
426 if (ustate->xsave.header.xfeatures & ~xcr0)
427 return -EINVAL;
428
d7e5acea
KH
429 /*
430 * Nullify @vpkru to preserve its current value if PKRU's bit isn't set
431 * in the header. KVM's odd ABI is to leave PKRU untouched in this
432 * case (all other components are eventually re-initialized).
433 */
434 if (!(ustate->xsave.header.xfeatures & XFEATURE_MASK_PKRU))
435 vpkru = NULL;
ea4d6938 436
4a804c4f 437 return copy_uabi_from_kernel_to_xstate(kstate, ustate, vpkru);
ea4d6938 438}
d69c1382 439EXPORT_SYMBOL_GPL(fpu_copy_uabi_to_guest_fpstate);
ea4d6938 440#endif /* CONFIG_KVM */
1d9bffab 441
e4512289 442void kernel_fpu_begin_mask(unsigned int kfpu_mask)
8546c008 443{
d0219855
EB
444 if (!irqs_disabled())
445 fpregs_lock();
8546c008 446
e97131a8 447 WARN_ON_FPU(!irq_fpu_usable());
63c6680c 448
2297554f
EB
449 /* Toggle kernel_fpu_allowed to false: */
450 WARN_ON_FPU(!this_cpu_read(kernel_fpu_allowed));
451 this_cpu_write(kernel_fpu_allowed, false);
6d79d86f 452
f9010dbd 453 if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) &&
6d79d86f
CH
454 !test_thread_flag(TIF_NEED_FPU_LOAD)) {
455 set_thread_flag(TIF_NEED_FPU_LOAD);
e3bfa385 456 save_fpregs_to_fpstate(x86_task_fpu(current));
8546c008 457 }
5f409e20 458 __cpu_invalidate_fpregs_state();
7ad81676 459
e4512289
AL
460 /* Put sane initial values into the control registers. */
461 if (likely(kfpu_mask & KFPU_MXCSR) && boot_cpu_has(X86_FEATURE_XMM))
7ad81676
PA
462 ldmxcsr(MXCSR_DEFAULT);
463
e4512289 464 if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
7ad81676 465 asm volatile ("fninit");
8546c008 466}
e4512289 467EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
d63e79b1
IM
468
469void kernel_fpu_end(void)
470{
2297554f
EB
471 /* Toggle kernel_fpu_allowed back to true: */
472 WARN_ON_FPU(this_cpu_read(kernel_fpu_allowed));
473 this_cpu_write(kernel_fpu_allowed, true);
b78ea19a 474
d0219855
EB
475 if (!irqs_disabled())
476 fpregs_unlock();
d63e79b1
IM
477}
478EXPORT_SYMBOL_GPL(kernel_fpu_end);
479
4af08f2f 480/*
b2681e79
TG
481 * Sync the FPU register state to current's memory register state when the
482 * current task owns the FPU. The hardware register state is preserved.
4af08f2f 483 */
b2681e79 484void fpu_sync_fpstate(struct fpu *fpu)
8546c008 485{
e3bfa385 486 WARN_ON_FPU(fpu != x86_task_fpu(current));
87cdb98a 487
5f409e20 488 fpregs_lock();
d1898b73 489 trace_x86_fpu_before_save(fpu);
2722146e 490
08ded2cd
TG
491 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
492 save_fpregs_to_fpstate(fpu);
2722146e 493
d1898b73 494 trace_x86_fpu_after_save(fpu);
5f409e20 495 fpregs_unlock();
8546c008 496}
8546c008 497
509e7a30
TG
498static inline unsigned int init_fpstate_copy_size(void)
499{
500 if (!use_xsave())
2bd264bc 501 return fpu_kernel_cfg.default_size;
509e7a30
TG
502
503 /* XSAVE(S) just needs the legacy and the xstate header part */
f83ac56a 504 return sizeof(init_fpstate.regs.xsave);
509e7a30
TG
505}
506
f83ac56a 507static inline void fpstate_init_fxstate(struct fpstate *fpstate)
e6852445 508{
f83ac56a
TG
509 fpstate->regs.fxsave.cwd = 0x37f;
510 fpstate->regs.fxsave.mxcsr = MXCSR_DEFAULT;
e6852445
TG
511}
512
0aba6978
IM
513/*
514 * Legacy x87 fpstate state init:
515 */
f83ac56a 516static inline void fpstate_init_fstate(struct fpstate *fpstate)
0aba6978 517{
f83ac56a
TG
518 fpstate->regs.fsave.cwd = 0xffff037fu;
519 fpstate->regs.fsave.swd = 0xffff0000u;
520 fpstate->regs.fsave.twd = 0xffffffffu;
521 fpstate->regs.fsave.fos = 0xffff0000u;
0aba6978
IM
522}
523
126fe040
TG
524/*
525 * Used in two places:
526 * 1) Early boot to setup init_fpstate for non XSAVE systems
878477a5 527 * 2) fpu_alloc_guest_fpstate() which is invoked from KVM
126fe040 528 */
f83ac56a 529void fpstate_init_user(struct fpstate *fpstate)
1da177e4 530{
126fe040 531 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
f83ac56a 532 fpstate_init_soft(&fpstate->regs.soft);
86603283 533 return;
e8a496ac 534 }
e8a496ac 535
daddee24 536 xstate_init_xcomp_bv(&fpstate->regs.xsave, fpstate->xfeatures);
1d23c451 537
126fe040 538 if (cpu_feature_enabled(X86_FEATURE_FXSR))
f83ac56a 539 fpstate_init_fxstate(fpstate);
0aba6978 540 else
f83ac56a 541 fpstate_init_fstate(fpstate);
86603283 542}
126fe040 543
b0237dad 544static void __fpstate_reset(struct fpstate *fpstate, u64 xfd)
69f6ed1d
TG
545{
546 /* Initialize sizes and feature masks */
547 fpstate->size = fpu_kernel_cfg.default_size;
548 fpstate->user_size = fpu_user_cfg.default_size;
549 fpstate->xfeatures = fpu_kernel_cfg.default_features;
550 fpstate->user_xfeatures = fpu_user_cfg.default_features;
b0237dad 551 fpstate->xfd = xfd;
69f6ed1d
TG
552}
553
87d0e5be
TG
554void fpstate_reset(struct fpu *fpu)
555{
556 /* Set the fpstate pointer to the default fpstate */
557 fpu->fpstate = &fpu->__fpstate;
b0237dad 558 __fpstate_reset(fpu->fpstate, init_fpstate.xfd);
6f6a7c09
TG
559
560 /* Initialize the permission related info in fpu */
561 fpu->perm.__state_perm = fpu_kernel_cfg.default_features;
562 fpu->perm.__state_size = fpu_kernel_cfg.default_size;
563 fpu->perm.__user_state_size = fpu_user_cfg.default_size;
980fe2fd
TG
564 /* Same defaults for guests */
565 fpu->guest_perm = fpu->perm;
87d0e5be
TG
566}
567
9e798e9a
TG
568static inline void fpu_inherit_perms(struct fpu *dst_fpu)
569{
570 if (fpu_state_size_dynamic()) {
e3bfa385 571 struct fpu *src_fpu = x86_task_fpu(current->group_leader);
9e798e9a
TG
572
573 spin_lock_irq(&current->sighand->siglock);
574 /* Fork also inherits the permissions of the parent */
575 dst_fpu->perm = src_fpu->perm;
980fe2fd 576 dst_fpu->guest_perm = src_fpu->guest_perm;
9e798e9a
TG
577 spin_unlock_irq(&current->sighand->siglock);
578 }
579}
580
b2926a36
RE
581/* A passed ssp of zero will not cause any update */
582static int update_fpu_shstk(struct task_struct *dst, unsigned long ssp)
583{
584#ifdef CONFIG_X86_USER_SHADOW_STACK
585 struct cet_user_state *xstate;
586
587 /* If ssp update is not needed. */
588 if (!ssp)
589 return 0;
590
e3bfa385 591 xstate = get_xsave_addr(&x86_task_fpu(dst)->fpstate->regs.xsave,
b2926a36
RE
592 XFEATURE_CET_USER);
593
594 /*
595 * If there is a non-zero ssp, then 'dst' must be configured with a shadow
596 * stack and the fpu state should be up to date since it was just copied
597 * from the parent in fpu_clone(). So there must be a valid non-init CET
598 * state location in the buffer.
599 */
600 if (WARN_ON_ONCE(!xstate))
601 return 1;
602
603 xstate->user_ssp = (u64)ssp;
604#endif
605 return 0;
606}
607
b2681e79 608/* Clone current's FPU state on fork */
b2926a36
RE
609int fpu_clone(struct task_struct *dst, unsigned long clone_flags, bool minimal,
610 unsigned long ssp)
e102f30f 611{
cb7ca40a
IM
612 /*
613 * We allocate the new FPU structure right after the end of the task struct.
614 * task allocation size already took this into account.
615 *
e3a52b67
IM
616 * This is safe because task_struct size is a multiple of cacheline size,
617 * thus x86_task_fpu() will always be cacheline aligned as well.
cb7ca40a 618 */
22aafe3b 619 struct fpu *dst_fpu = (void *)dst + sizeof(*dst);
cb7ca40a
IM
620
621 BUILD_BUG_ON(sizeof(*dst) % SMP_CACHE_BYTES != 0);
5f409e20 622
b2681e79 623 /* The new task's FPU state cannot be valid in the hardware. */
a20d7297
AL
624 dst_fpu->last_cpu = -1;
625
87d0e5be
TG
626 fpstate_reset(dst_fpu);
627
b2681e79 628 if (!cpu_feature_enabled(X86_FEATURE_FPU))
a20d7297
AL
629 return 0;
630
509e7a30
TG
631 /*
632 * Enforce reload for user space tasks and prevent kernel threads
633 * from trying to save the FPU registers on context switch.
634 */
635 set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
636
637 /*
638 * No FPU state inheritance for kernel threads and IO
639 * worker threads.
640 */
5bd2e97c 641 if (minimal) {
509e7a30 642 /* Clear out the minimal state */
c20942ce 643 memcpy(&dst_fpu->fpstate->regs, &init_fpstate.regs,
509e7a30
TG
644 init_fpstate_copy_size());
645 return 0;
646 }
647
2308ee57
CB
648 /*
649 * If a new feature is added, ensure all dynamic features are
650 * caller-saved from here!
651 */
652 BUILD_BUG_ON(XFEATURE_MASK_USER_DYNAMIC != XFEATURE_MASK_XTILE_DATA);
653
b1652900 654 /*
9e798e9a
TG
655 * Save the default portion of the current FPU state into the
656 * clone. Assume all dynamic features to be defined as caller-
657 * saved, which enables skipping both the expansion of fpstate
658 * and the copying of any dynamic state.
659 *
660 * Do not use memcpy() when TIF_NEED_FPU_LOAD is set because
661 * copying is not valid when current uses non-default states.
b1652900 662 */
5f409e20 663 fpregs_lock();
9e798e9a
TG
664 if (test_thread_flag(TIF_NEED_FPU_LOAD))
665 fpregs_restore_userregs();
666 save_fpregs_to_fpstate(dst_fpu);
36b03879 667 fpregs_unlock();
9e798e9a
TG
668 if (!(clone_flags & CLONE_THREAD))
669 fpu_inherit_perms(dst_fpu);
5f409e20 670
dc7507dd
FY
671 /*
672 * Children never inherit PASID state.
673 * Force it to have its init value:
674 */
675 if (use_xsave())
676 dst_fpu->fpstate->regs.xsave.header.xfeatures &= ~XFEATURE_MASK_PASID;
677
b2926a36
RE
678 /*
679 * Update shadow stack pointer, in case it changed during clone.
680 */
681 if (update_fpu_shstk(dst, ssp))
682 return 1;
683
d1898b73
DH
684 trace_x86_fpu_copy_dst(dst_fpu);
685
a752b53d
IM
686 return 0;
687}
688
2dd8eedc 689/*
960bc2bc
KC
690 * While struct fpu is no longer part of struct thread_struct, it is still
691 * allocated after struct task_struct in the "task_struct" kmem cache. But
692 * since FPU is expected to be part of struct thread_struct, we have to
693 * adjust for it here.
2dd8eedc
TG
694 */
695void fpu_thread_struct_whitelist(unsigned long *offset, unsigned long *size)
696{
960bc2bc
KC
697 /* The allocation follows struct task_struct. */
698 *offset = sizeof(struct task_struct) - offsetof(struct task_struct, thread);
699 *offset += offsetof(struct fpu, __fpstate.regs);
2bd264bc 700 *size = fpu_kernel_cfg.default_size;
2dd8eedc
TG
701}
702
6ffc152e
IM
703/*
704 * Drops current FPU state: deactivates the fpregs and
705 * the fpstate. NOTE: it still leaves previous contents
706 * in the fpregs in the eager-FPU case.
707 *
708 * This function can be used in cases where we know that
709 * a state-restore is coming: either an explicit one,
710 * or a reschedule.
711 */
ec2227e0 712void fpu__drop(struct task_struct *tsk)
6ffc152e 713{
c360bdc5
IM
714 struct fpu *fpu;
715
016a2e6f 716 if (test_tsk_thread_flag(tsk, TIF_NEED_FPU_LOAD))
c360bdc5
IM
717 return;
718
719 fpu = x86_task_fpu(tsk);
ec2227e0 720
6ffc152e 721 preempt_disable();
6ffc152e 722
e3bfa385 723 if (fpu == x86_task_fpu(current)) {
2722146e
SAS
724 /* Ignore delayed exceptions from user space */
725 asm volatile("1: fwait\n"
726 "2:\n"
727 _ASM_EXTABLE(1b, 2b));
728 fpregs_deactivate(fpu);
6ffc152e
IM
729 }
730
d1898b73
DH
731 trace_x86_fpu_dropped(fpu);
732
6ffc152e
IM
733 preempt_enable();
734}
735
81541889 736/*
b860eb8d
FY
737 * Clear FPU registers by setting them up from the init fpstate.
738 * Caller must do fpregs_[un]lock() around it.
81541889 739 */
b76411b1 740static inline void restore_fpregs_from_init_fpstate(u64 features_mask)
81541889
IM
741{
742 if (use_xsave())
5529acf4 743 os_xrstor(&init_fpstate, features_mask);
16dcf438 744 else if (use_fxsr())
f83ac56a 745 fxrstor(&init_fpstate.regs.fxsave);
6e686709 746 else
f83ac56a 747 frstor(&init_fpstate.regs.fsave);
acd547b2 748
37107113 749 pkru_write_default();
81541889
IM
750}
751
6ffc152e 752/*
33344368 753 * Reset current->fpu memory state to the init values.
2e85591a 754 */
de8304c3 755static void fpu_reset_fpstate_regs(void)
33344368 756{
e3bfa385 757 struct fpu *fpu = x86_task_fpu(current);
33344368
AL
758
759 fpregs_lock();
1f69383b 760 __fpu_invalidate_fpregs_state(fpu);
33344368
AL
761 /*
762 * This does not change the actual hardware registers. It just
763 * resets the memory image and sets TIF_NEED_FPU_LOAD so a
764 * subsequent return to usermode will reload the registers from the
765 * task's memory image.
766 *
767 * Do not use fpstate_init() here. Just copy init_fpstate which has
768 * the correct content already except for PKRU.
0e8c54f6
TG
769 *
770 * PKRU handling does not rely on the xstate when restoring for
771 * user space as PKRU is eagerly written in switch_to() and
772 * flush_thread().
33344368 773 */
c20942ce 774 memcpy(&fpu->fpstate->regs, &init_fpstate.regs, init_fpstate_copy_size());
33344368
AL
775 set_thread_flag(TIF_NEED_FPU_LOAD);
776 fpregs_unlock();
777}
778
779/*
780 * Reset current's user FPU states to the init states. current's
781 * supervisor states, if any, are not modified by this function. The
782 * caller guarantees that the XSTATE header in memory is intact.
783 */
784void fpu__clear_user_states(struct fpu *fpu)
81683cc8 785{
e3bfa385 786 WARN_ON_FPU(fpu != x86_task_fpu(current));
4c138410 787
33344368
AL
788 fpregs_lock();
789 if (!cpu_feature_enabled(X86_FEATURE_FPU)) {
de8304c3 790 fpu_reset_fpstate_regs();
33344368 791 fpregs_unlock();
b860eb8d
FY
792 return;
793 }
b22cbe40 794
33344368
AL
795 /*
796 * Ensure that current's supervisor states are loaded into their
797 * corresponding registers.
798 */
799 if (xfeatures_mask_supervisor() &&
5529acf4
TG
800 !fpregs_state_valid(fpu, smp_processor_id()))
801 os_xrstor_supervisor(fpu->fpstate);
b860eb8d 802
33344368 803 /* Reset user states in registers. */
eda32f4f 804 restore_fpregs_from_init_fpstate(XFEATURE_MASK_USER_RESTORE);
33344368
AL
805
806 /*
807 * Now all FPU registers have their desired values. Inform the FPU
808 * state machine that current's FPU registers are in the hardware
809 * registers. The memory image does not need to be updated because
810 * any operation relying on it has to save the registers first when
811 * current's FPU is marked active.
812 */
b860eb8d
FY
813 fpregs_mark_activate();
814 fpregs_unlock();
815}
816
e7ecad17 817void fpu_flush_thread(void)
b860eb8d 818{
e3bfa385 819 fpstate_reset(x86_task_fpu(current));
de8304c3 820 fpu_reset_fpstate_regs();
81683cc8 821}
5f409e20
RR
822/*
823 * Load FPU context before returning to userspace.
824 */
825void switch_fpu_return(void)
826{
827 if (!static_cpu_has(X86_FEATURE_FPU))
828 return;
829
727d0110 830 fpregs_restore_userregs();
5f409e20
RR
831}
832EXPORT_SYMBOL_GPL(switch_fpu_return);
833
6ee83668
RE
834void fpregs_lock_and_load(void)
835{
836 /*
837 * fpregs_lock() only disables preemption (mostly). So modifying state
838 * in an interrupt could screw up some in progress fpregs operation.
839 * Warn about it.
840 */
841 WARN_ON_ONCE(!irq_fpu_usable());
842 WARN_ON_ONCE(current->flags & PF_KTHREAD);
843
844 fpregs_lock();
845
846 fpregs_assert_state_consistent();
847
848 if (test_thread_flag(TIF_NEED_FPU_LOAD))
849 fpregs_restore_userregs();
850}
851
5f409e20
RR
852#ifdef CONFIG_X86_DEBUG_FPU
853/*
854 * If current FPU state according to its tracking (loaded FPU context on this
855 * CPU) is not valid then we must have TIF_NEED_FPU_LOAD set so the context is
856 * loaded on return to userland.
857 */
858void fpregs_assert_state_consistent(void)
859{
e3bfa385 860 struct fpu *fpu = x86_task_fpu(current);
5f409e20
RR
861
862 if (test_thread_flag(TIF_NEED_FPU_LOAD))
863 return;
864
865 WARN_ON_FPU(!fpregs_state_valid(fpu, smp_processor_id()));
866}
867EXPORT_SYMBOL_GPL(fpregs_assert_state_consistent);
868#endif
869
870void fpregs_mark_activate(void)
871{
e3bfa385 872 struct fpu *fpu = x86_task_fpu(current);
5f409e20
RR
873
874 fpregs_activate(fpu);
875 fpu->last_cpu = smp_processor_id();
876 clear_thread_flag(TIF_NEED_FPU_LOAD);
877}
5f409e20 878
e1cebad4
IM
879/*
880 * x87 math exception handling:
881 */
882
e1cebad4
IM
883int fpu__exception_code(struct fpu *fpu, int trap_nr)
884{
885 int err;
886
887 if (trap_nr == X86_TRAP_MF) {
888 unsigned short cwd, swd;
889 /*
890 * (~cwd & swd) will mask out exceptions that are not set to unmasked
891 * status. 0x3f is the exception bits in these regs, 0x200 is the
892 * C1 reg you need in case of a stack fault, 0x040 is the stack
893 * fault bit. We should only be taking one exception at a time,
894 * so if this combination doesn't produce any single exception,
895 * then we have a bad program that isn't synchronizing its FPU usage
896 * and it will suffer the consequences since we won't be able to
6aa6dbfc 897 * fully reproduce the context of the exception.
e1cebad4 898 */
6aa6dbfc 899 if (boot_cpu_has(X86_FEATURE_FXSR)) {
c20942ce
TG
900 cwd = fpu->fpstate->regs.fxsave.cwd;
901 swd = fpu->fpstate->regs.fxsave.swd;
6aa6dbfc 902 } else {
c20942ce
TG
903 cwd = (unsigned short)fpu->fpstate->regs.fsave.cwd;
904 swd = (unsigned short)fpu->fpstate->regs.fsave.swd;
6aa6dbfc 905 }
e1cebad4
IM
906
907 err = swd & ~cwd;
908 } else {
909 /*
910 * The SIMD FPU exceptions are handled a little differently, as there
911 * is only a single status/control register. Thus, to determine which
912 * unmasked exception was caught we must mask the exception mask bits
913 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
914 */
6aa6dbfc
BP
915 unsigned short mxcsr = MXCSR_DEFAULT;
916
917 if (boot_cpu_has(X86_FEATURE_XMM))
c20942ce 918 mxcsr = fpu->fpstate->regs.fxsave.mxcsr;
6aa6dbfc 919
e1cebad4
IM
920 err = ~(mxcsr >> 7) & mxcsr;
921 }
922
923 if (err & 0x001) { /* Invalid op */
924 /*
925 * swd & 0x240 == 0x040: Stack Underflow
926 * swd & 0x240 == 0x240: Stack Overflow
927 * User must clear the SF bit (0x40) if set
928 */
929 return FPE_FLTINV;
930 } else if (err & 0x004) { /* Divide by Zero */
931 return FPE_FLTDIV;
932 } else if (err & 0x008) { /* Overflow */
933 return FPE_FLTOVF;
934 } else if (err & 0x012) { /* Denormal, Underflow */
935 return FPE_FLTUND;
936 } else if (err & 0x020) { /* Precision */
937 return FPE_FLTRES;
938 }
939
940 /*
941 * If we're using IRQ 13, or supposedly even some trap
942 * X86_TRAP_MF implementations, it's possible
943 * we get a spurious trap, which is not an error.
944 */
945 return 0;
946}
f17b1687
CB
947
948/*
949 * Initialize register state that may prevent from entering low-power idle.
950 * This function will be invoked from the cpuidle driver only when needed.
951 */
821ad23d 952noinstr void fpu_idle_fpregs(void)
f17b1687
CB
953{
954 /* Note: AMX_TILE being enabled implies XGETBV1 support */
955 if (cpu_feature_enabled(X86_FEATURE_AMX_TILE) &&
956 (xfeatures_in_use() & XFEATURE_MASK_XTILE)) {
957 tile_release();
821ad23d 958 __this_cpu_write(fpu_fpregs_owner_ctx, NULL);
f17b1687
CB
959 }
960}