1 // SPDX-License-Identifier: GPL-2.0-only
3 * xsave/xrstor support.
5 * Author: Suresh Siddha <suresh.b.siddha@intel.com>
7 #include <linux/compat.h>
9 #include <linux/mman.h>
10 #include <linux/pkeys.h>
11 #include <linux/seq_file.h>
12 #include <linux/proc_fs.h>
14 #include <asm/fpu/api.h>
15 #include <asm/fpu/internal.h>
16 #include <asm/fpu/signal.h>
17 #include <asm/fpu/regset.h>
18 #include <asm/fpu/xstate.h>
20 #include <asm/tlbflush.h>
21 #include <asm/cpufeature.h>
24 * Although we spell it out in here, the Processor Trace
25 * xfeature is completely unused. We use other mechanisms
26 * to save/restore PT state in Linux.
28 static const char *xfeature_names[] =
30 "x87 floating point registers" ,
33 "MPX bounds registers" ,
38 "Processor Trace (unused)" ,
39 "Protection Keys User registers",
40 "unknown xstate feature" ,
43 static short xsave_cpuid_features[] __initdata = {
57 * This represents the full set of bits that should ever be set in a kernel
58 * XSAVE buffer, both supervisor and user xstates.
60 u64 xfeatures_mask_all __read_mostly;
62 static unsigned int xstate_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
63 static unsigned int xstate_sizes[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
64 static unsigned int xstate_comp_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
65 static unsigned int xstate_supervisor_only_offsets[XFEATURE_MAX] = { [ 0 ... XFEATURE_MAX - 1] = -1};
68 * The XSAVE area of kernel can be in standard or compacted format;
69 * it is always in standard format for user mode. This is the user
70 * mode standard format size used for signal and ptrace frames.
72 unsigned int fpu_user_xstate_size;
75 * Return whether the system supports a given xfeature.
77 * Also return the name of the (most advanced) feature that the caller requested:
79 int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
81 u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask_all;
83 if (unlikely(feature_name)) {
84 long xfeature_idx, max_idx;
87 * So we use FLS here to be able to print the most advanced
88 * feature that was requested but is missing. So if a driver
89 * asks about "XFEATURE_MASK_SSE | XFEATURE_MASK_YMM" we'll print the
90 * missing AVX feature - this is the most informative message
93 if (xfeatures_missing)
94 xfeatures_print = xfeatures_missing;
96 xfeatures_print = xfeatures_needed;
98 xfeature_idx = fls64(xfeatures_print)-1;
99 max_idx = ARRAY_SIZE(xfeature_names)-1;
100 xfeature_idx = min(xfeature_idx, max_idx);
102 *feature_name = xfeature_names[xfeature_idx];
105 if (xfeatures_missing)
110 EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
112 static bool xfeature_is_supervisor(int xfeature_nr)
115 * Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
116 * returns ECX[0] set to (1) for a supervisor state, and cleared (0)
119 u32 eax, ebx, ecx, edx;
121 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
126 * When executing XSAVEOPT (or other optimized XSAVE instructions), if
127 * a processor implementation detects that an FPU state component is still
128 * (or is again) in its initialized state, it may clear the corresponding
129 * bit in the header.xfeatures field, and can skip the writeout of registers
130 * to the corresponding memory layout.
132 * This means that when the bit is zero, the state component might still contain
133 * some previous - non-initialized register state.
135 * Before writing xstate information to user-space we sanitize those components,
136 * to always ensure that the memory layout of a feature will be in the init state
137 * if the corresponding header bit is zero. This is to ensure that user-space doesn't
138 * see some stale state in the memory layout during signal handling, debugging etc.
140 void fpstate_sanitize_xstate(struct fpu *fpu)
142 struct fxregs_state *fx = &fpu->state.fxsave;
149 xfeatures = fpu->state.xsave.header.xfeatures;
152 * None of the feature bits are in init state. So nothing else
153 * to do for us, as the memory layout is up to date.
155 if ((xfeatures & xfeatures_mask_all) == xfeatures_mask_all)
159 * FP is in init state
161 if (!(xfeatures & XFEATURE_MASK_FP)) {
168 memset(&fx->st_space[0], 0, 128);
172 * SSE is in init state
174 if (!(xfeatures & XFEATURE_MASK_SSE))
175 memset(&fx->xmm_space[0], 0, 256);
178 * First two features are FPU and SSE, which above we handled
179 * in a special way already:
182 xfeatures = (xfeatures_mask_user() & ~xfeatures) >> 2;
185 * Update all the remaining memory layouts according to their
186 * standard xstate layout, if their header bit is in the init
190 if (xfeatures & 0x1) {
191 int offset = xstate_comp_offsets[feature_bit];
192 int size = xstate_sizes[feature_bit];
194 memcpy((void *)fx + offset,
195 (void *)&init_fpstate.xsave + offset,
205 * Enable the extended processor state save/restore feature.
206 * Called once per CPU onlining.
208 void fpu__init_cpu_xstate(void)
212 if (!boot_cpu_has(X86_FEATURE_XSAVE) || !xfeatures_mask_all)
215 * Unsupported supervisor xstates should not be found in
216 * the xfeatures mask.
218 unsup_bits = xfeatures_mask_all & XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
219 WARN_ONCE(unsup_bits, "x86/fpu: Found unsupported supervisor xstates: 0x%llx\n",
222 xfeatures_mask_all &= ~XFEATURE_MASK_SUPERVISOR_UNSUPPORTED;
224 cr4_set_bits(X86_CR4_OSXSAVE);
227 * XCR_XFEATURE_ENABLED_MASK (aka. XCR0) sets user features
228 * managed by XSAVE{C, OPT, S} and XRSTOR{S}. Only XSAVE user
229 * states can be set here.
231 xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
234 * MSR_IA32_XSS sets supervisor states managed by XSAVES.
236 if (boot_cpu_has(X86_FEATURE_XSAVES))
237 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
240 static bool xfeature_enabled(enum xfeature xfeature)
242 return xfeatures_mask_all & BIT_ULL(xfeature);
246 * Record the offsets and sizes of various xstates contained
247 * in the XSAVE state memory layout.
249 static void __init setup_xstate_features(void)
251 u32 eax, ebx, ecx, edx, i;
252 /* start at the beginnning of the "extended state" */
253 unsigned int last_good_offset = offsetof(struct xregs_state,
254 extended_state_area);
256 * The FP xstates and SSE xstates are legacy states. They are always
257 * in the fixed offsets in the xsave area in either compacted form
260 xstate_offsets[XFEATURE_FP] = 0;
261 xstate_sizes[XFEATURE_FP] = offsetof(struct fxregs_state,
264 xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP];
265 xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state,
268 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
269 if (!xfeature_enabled(i))
272 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
274 xstate_sizes[i] = eax;
277 * If an xfeature is supervisor state, the offset in EBX is
278 * invalid, leave it to -1.
280 if (xfeature_is_supervisor(i))
283 xstate_offsets[i] = ebx;
286 * In our xstate size checks, we assume that the highest-numbered
287 * xstate feature has the highest offset in the buffer. Ensure
290 WARN_ONCE(last_good_offset > xstate_offsets[i],
291 "x86/fpu: misordered xstate at %d\n", last_good_offset);
293 last_good_offset = xstate_offsets[i];
297 static void __init print_xstate_feature(u64 xstate_mask)
299 const char *feature_name;
301 if (cpu_has_xfeatures(xstate_mask, &feature_name))
302 pr_info("x86/fpu: Supporting XSAVE feature 0x%03Lx: '%s'\n", xstate_mask, feature_name);
306 * Print out all the supported xstate features:
308 static void __init print_xstate_features(void)
310 print_xstate_feature(XFEATURE_MASK_FP);
311 print_xstate_feature(XFEATURE_MASK_SSE);
312 print_xstate_feature(XFEATURE_MASK_YMM);
313 print_xstate_feature(XFEATURE_MASK_BNDREGS);
314 print_xstate_feature(XFEATURE_MASK_BNDCSR);
315 print_xstate_feature(XFEATURE_MASK_OPMASK);
316 print_xstate_feature(XFEATURE_MASK_ZMM_Hi256);
317 print_xstate_feature(XFEATURE_MASK_Hi16_ZMM);
318 print_xstate_feature(XFEATURE_MASK_PKRU);
322 * This check is important because it is easy to get XSTATE_*
323 * confused with XSTATE_BIT_*.
325 #define CHECK_XFEATURE(nr) do { \
326 WARN_ON(nr < FIRST_EXTENDED_XFEATURE); \
327 WARN_ON(nr >= XFEATURE_MAX); \
331 * We could cache this like xstate_size[], but we only use
332 * it here, so it would be a waste of space.
334 static int xfeature_is_aligned(int xfeature_nr)
336 u32 eax, ebx, ecx, edx;
338 CHECK_XFEATURE(xfeature_nr);
340 if (!xfeature_enabled(xfeature_nr)) {
341 WARN_ONCE(1, "Checking alignment of disabled xfeature %d\n",
346 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
348 * The value returned by ECX[1] indicates the alignment
349 * of state component 'i' when the compacted format
350 * of the extended region of an XSAVE area is used:
356 * This function sets up offsets and sizes of all extended states in
357 * xsave area. This supports both standard format and compacted format
360 static void __init setup_xstate_comp_offsets(void)
362 unsigned int next_offset;
366 * The FP xstates and SSE xstates are legacy states. They are always
367 * in the fixed offsets in the xsave area in either compacted form
370 xstate_comp_offsets[XFEATURE_FP] = 0;
371 xstate_comp_offsets[XFEATURE_SSE] = offsetof(struct fxregs_state,
374 if (!boot_cpu_has(X86_FEATURE_XSAVES)) {
375 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
376 if (xfeature_enabled(i))
377 xstate_comp_offsets[i] = xstate_offsets[i];
382 next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
384 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
385 if (!xfeature_enabled(i))
388 if (xfeature_is_aligned(i))
389 next_offset = ALIGN(next_offset, 64);
391 xstate_comp_offsets[i] = next_offset;
392 next_offset += xstate_sizes[i];
397 * Setup offsets of a supervisor-state-only XSAVES buffer:
399 * The offsets stored in xstate_comp_offsets[] only work for one specific
400 * value of the Requested Feature BitMap (RFBM). In cases where a different
401 * RFBM value is used, a different set of offsets is required. This set of
402 * offsets is for when RFBM=xfeatures_mask_supervisor().
404 static void __init setup_supervisor_only_offsets(void)
406 unsigned int next_offset;
409 next_offset = FXSAVE_SIZE + XSAVE_HDR_SIZE;
411 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
412 if (!xfeature_enabled(i) || !xfeature_is_supervisor(i))
415 if (xfeature_is_aligned(i))
416 next_offset = ALIGN(next_offset, 64);
418 xstate_supervisor_only_offsets[i] = next_offset;
419 next_offset += xstate_sizes[i];
424 * Print out xstate component offsets and sizes
426 static void __init print_xstate_offset_size(void)
430 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
431 if (!xfeature_enabled(i))
433 pr_info("x86/fpu: xstate_offset[%d]: %4d, xstate_sizes[%d]: %4d\n",
434 i, xstate_comp_offsets[i], i, xstate_sizes[i]);
439 * setup the xstate image representing the init state
441 static void __init setup_init_fpu_buf(void)
443 static int on_boot_cpu __initdata = 1;
445 WARN_ON_FPU(!on_boot_cpu);
448 if (!boot_cpu_has(X86_FEATURE_XSAVE))
451 setup_xstate_features();
452 print_xstate_features();
454 if (boot_cpu_has(X86_FEATURE_XSAVES))
455 init_fpstate.xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
459 * Init all the features state with header.xfeatures being 0x0
461 copy_kernel_to_xregs_booting(&init_fpstate.xsave);
464 * Dump the init state again. This is to identify the init state
465 * of any feature which is not represented by all zero's.
467 copy_xregs_to_kernel_booting(&init_fpstate.xsave);
470 static int xfeature_uncompacted_offset(int xfeature_nr)
472 u32 eax, ebx, ecx, edx;
475 * Only XSAVES supports supervisor states and it uses compacted
476 * format. Checking a supervisor state's uncompacted offset is
479 if (XFEATURE_MASK_SUPERVISOR_ALL & BIT_ULL(xfeature_nr)) {
480 WARN_ONCE(1, "No fixed offset for xstate %d\n", xfeature_nr);
484 CHECK_XFEATURE(xfeature_nr);
485 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
489 static int xfeature_size(int xfeature_nr)
491 u32 eax, ebx, ecx, edx;
493 CHECK_XFEATURE(xfeature_nr);
494 cpuid_count(XSTATE_CPUID, xfeature_nr, &eax, &ebx, &ecx, &edx);
499 * 'XSAVES' implies two different things:
500 * 1. saving of supervisor/system state
501 * 2. using the compacted format
503 * Use this function when dealing with the compacted format so
504 * that it is obvious which aspect of 'XSAVES' is being handled
505 * by the calling code.
507 int using_compacted_format(void)
509 return boot_cpu_has(X86_FEATURE_XSAVES);
512 /* Validate an xstate header supplied by userspace (ptrace or sigreturn) */
513 int validate_user_xstate_header(const struct xstate_header *hdr)
515 /* No unknown or supervisor features may be set */
516 if (hdr->xfeatures & ~xfeatures_mask_user())
519 /* Userspace must use the uncompacted format */
524 * If 'reserved' is shrunken to add a new field, make sure to validate
525 * that new field here!
527 BUILD_BUG_ON(sizeof(hdr->reserved) != 48);
529 /* No reserved bits may be set */
530 if (memchr_inv(hdr->reserved, 0, sizeof(hdr->reserved)))
536 static void __xstate_dump_leaves(void)
539 u32 eax, ebx, ecx, edx;
540 static int should_dump = 1;
546 * Dump out a few leaves past the ones that we support
547 * just in case there are some goodies up there
549 for (i = 0; i < XFEATURE_MAX + 10; i++) {
550 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
551 pr_warn("CPUID[%02x, %02x]: eax=%08x ebx=%08x ecx=%08x edx=%08x\n",
552 XSTATE_CPUID, i, eax, ebx, ecx, edx);
556 #define XSTATE_WARN_ON(x) do { \
557 if (WARN_ONCE(x, "XSAVE consistency problem, dumping leaves")) { \
558 __xstate_dump_leaves(); \
562 #define XCHECK_SZ(sz, nr, nr_macro, __struct) do { \
563 if ((nr == nr_macro) && \
564 WARN_ONCE(sz != sizeof(__struct), \
565 "%s: struct is %zu bytes, cpu state %d bytes\n", \
566 __stringify(nr_macro), sizeof(__struct), sz)) { \
567 __xstate_dump_leaves(); \
572 * We have a C struct for each 'xstate'. We need to ensure
573 * that our software representation matches what the CPU
574 * tells us about the state's size.
576 static void check_xstate_against_struct(int nr)
579 * Ask the CPU for the size of the state.
581 int sz = xfeature_size(nr);
583 * Match each CPU state with the corresponding software
586 XCHECK_SZ(sz, nr, XFEATURE_YMM, struct ymmh_struct);
587 XCHECK_SZ(sz, nr, XFEATURE_BNDREGS, struct mpx_bndreg_state);
588 XCHECK_SZ(sz, nr, XFEATURE_BNDCSR, struct mpx_bndcsr_state);
589 XCHECK_SZ(sz, nr, XFEATURE_OPMASK, struct avx_512_opmask_state);
590 XCHECK_SZ(sz, nr, XFEATURE_ZMM_Hi256, struct avx_512_zmm_uppers_state);
591 XCHECK_SZ(sz, nr, XFEATURE_Hi16_ZMM, struct avx_512_hi16_state);
592 XCHECK_SZ(sz, nr, XFEATURE_PKRU, struct pkru_state);
595 * Make *SURE* to add any feature numbers in below if
596 * there are "holes" in the xsave state component
599 if ((nr < XFEATURE_YMM) ||
600 (nr >= XFEATURE_MAX) ||
601 (nr == XFEATURE_PT_UNIMPLEMENTED_SO_FAR)) {
602 WARN_ONCE(1, "no structure for xstate: %d\n", nr);
608 * This essentially double-checks what the cpu told us about
609 * how large the XSAVE buffer needs to be. We are recalculating
612 static void do_extra_xstate_size_checks(void)
614 int paranoid_xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
617 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
618 if (!xfeature_enabled(i))
621 check_xstate_against_struct(i);
623 * Supervisor state components can be managed only by
624 * XSAVES, which is compacted-format only.
626 if (!using_compacted_format())
627 XSTATE_WARN_ON(xfeature_is_supervisor(i));
629 /* Align from the end of the previous feature */
630 if (xfeature_is_aligned(i))
631 paranoid_xstate_size = ALIGN(paranoid_xstate_size, 64);
633 * The offset of a given state in the non-compacted
634 * format is given to us in a CPUID leaf. We check
635 * them for being ordered (increasing offsets) in
636 * setup_xstate_features().
638 if (!using_compacted_format())
639 paranoid_xstate_size = xfeature_uncompacted_offset(i);
641 * The compacted-format offset always depends on where
642 * the previous state ended.
644 paranoid_xstate_size += xfeature_size(i);
646 XSTATE_WARN_ON(paranoid_xstate_size != fpu_kernel_xstate_size);
651 * Get total size of enabled xstates in XCR0 | IA32_XSS.
653 * Note the SDM's wording here. "sub-function 0" only enumerates
654 * the size of the *user* states. If we use it to size a buffer
655 * that we use 'XSAVES' on, we could potentially overflow the
656 * buffer because 'XSAVES' saves system states too.
658 static unsigned int __init get_xsaves_size(void)
660 unsigned int eax, ebx, ecx, edx;
662 * - CPUID function 0DH, sub-function 1:
663 * EBX enumerates the size (in bytes) required by
664 * the XSAVES instruction for an XSAVE area
665 * containing all the state components
666 * corresponding to bits currently set in
669 cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
673 static unsigned int __init get_xsave_size(void)
675 unsigned int eax, ebx, ecx, edx;
677 * - CPUID function 0DH, sub-function 0:
678 * EBX enumerates the size (in bytes) required by
679 * the XSAVE instruction for an XSAVE area
680 * containing all the *user* state components
681 * corresponding to bits currently set in XCR0.
683 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
688 * Will the runtime-enumerated 'xstate_size' fit in the init
689 * task's statically-allocated buffer?
691 static bool is_supported_xstate_size(unsigned int test_xstate_size)
693 if (test_xstate_size <= sizeof(union fpregs_state))
696 pr_warn("x86/fpu: xstate buffer too small (%zu < %d), disabling xsave\n",
697 sizeof(union fpregs_state), test_xstate_size);
701 static int __init init_xstate_size(void)
703 /* Recompute the context size for enabled features: */
704 unsigned int possible_xstate_size;
705 unsigned int xsave_size;
707 xsave_size = get_xsave_size();
709 if (boot_cpu_has(X86_FEATURE_XSAVES))
710 possible_xstate_size = get_xsaves_size();
712 possible_xstate_size = xsave_size;
714 /* Ensure we have the space to store all enabled: */
715 if (!is_supported_xstate_size(possible_xstate_size))
719 * The size is OK, we are definitely going to use xsave,
720 * make it known to the world that we need more space.
722 fpu_kernel_xstate_size = possible_xstate_size;
723 do_extra_xstate_size_checks();
726 * User space is always in standard format.
728 fpu_user_xstate_size = xsave_size;
733 * We enabled the XSAVE hardware, but something went wrong and
734 * we can not use it. Disable it.
736 static void fpu__init_disable_system_xstate(void)
738 xfeatures_mask_all = 0;
739 cr4_clear_bits(X86_CR4_OSXSAVE);
740 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
744 * Enable and initialize the xsave feature.
745 * Called once per system bootup.
747 void __init fpu__init_system_xstate(void)
749 unsigned int eax, ebx, ecx, edx;
750 static int on_boot_cpu __initdata = 1;
754 WARN_ON_FPU(!on_boot_cpu);
757 if (!boot_cpu_has(X86_FEATURE_FPU)) {
758 pr_info("x86/fpu: No FPU detected\n");
762 if (!boot_cpu_has(X86_FEATURE_XSAVE)) {
763 pr_info("x86/fpu: x87 FPU will use %s\n",
764 boot_cpu_has(X86_FEATURE_FXSR) ? "FXSAVE" : "FSAVE");
768 if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
774 * Find user xstates supported by the processor.
776 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
777 xfeatures_mask_all = eax + ((u64)edx << 32);
780 * Find supervisor xstates supported by the processor.
782 cpuid_count(XSTATE_CPUID, 1, &eax, &ebx, &ecx, &edx);
783 xfeatures_mask_all |= ecx + ((u64)edx << 32);
785 if ((xfeatures_mask_user() & XFEATURE_MASK_FPSSE) != XFEATURE_MASK_FPSSE) {
787 * This indicates that something really unexpected happened
788 * with the enumeration. Disable XSAVE and try to continue
789 * booting without it. This is too early to BUG().
791 pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n",
797 * Clear XSAVE features that are disabled in the normal CPUID.
799 for (i = 0; i < ARRAY_SIZE(xsave_cpuid_features); i++) {
800 if (!boot_cpu_has(xsave_cpuid_features[i]))
801 xfeatures_mask_all &= ~BIT_ULL(i);
804 xfeatures_mask_all &= fpu__get_supported_xfeatures_mask();
806 /* Enable xstate instructions to be able to continue with initialization: */
807 fpu__init_cpu_xstate();
808 err = init_xstate_size();
813 * Update info used for ptrace frames; use standard-format size and no
814 * supervisor xstates:
816 update_regset_xstate_info(fpu_user_xstate_size, xfeatures_mask_user());
818 fpu__init_prepare_fx_sw_frame();
819 setup_init_fpu_buf();
820 setup_xstate_comp_offsets();
821 setup_supervisor_only_offsets();
822 print_xstate_offset_size();
824 pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
826 fpu_kernel_xstate_size,
827 boot_cpu_has(X86_FEATURE_XSAVES) ? "compacted" : "standard");
831 /* something went wrong, try to boot without any XSAVE support */
832 fpu__init_disable_system_xstate();
836 * Restore minimal FPU state after suspend:
838 void fpu__resume_cpu(void)
841 * Restore XCR0 on xsave capable CPUs:
843 if (boot_cpu_has(X86_FEATURE_XSAVE))
844 xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask_user());
847 * Restore IA32_XSS. The same CPUID bit enumerates support
848 * of XSAVES and MSR_IA32_XSS.
850 if (boot_cpu_has(X86_FEATURE_XSAVES))
851 wrmsrl(MSR_IA32_XSS, xfeatures_mask_supervisor());
855 * Given an xstate feature nr, calculate where in the xsave
856 * buffer the state is. Callers should ensure that the buffer
859 static void *__raw_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
861 if (!xfeature_enabled(xfeature_nr)) {
866 return (void *)xsave + xstate_comp_offsets[xfeature_nr];
869 * Given the xsave area and a state inside, this function returns the
870 * address of the state.
872 * This is the API that is called to get xstate address in either
873 * standard format or compacted format of xsave area.
875 * Note that if there is no data for the field in the xsave buffer
876 * this will return NULL.
879 * xstate: the thread's storage area for all FPU data
880 * xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
881 * XFEATURE_SSE, etc...)
883 * address of the state in the xsave area, or NULL if the
884 * field is not present in the xsave buffer.
886 void *get_xsave_addr(struct xregs_state *xsave, int xfeature_nr)
889 * Do we even *have* xsave state?
891 if (!boot_cpu_has(X86_FEATURE_XSAVE))
895 * We should not ever be requesting features that we
898 WARN_ONCE(!(xfeatures_mask_all & BIT_ULL(xfeature_nr)),
899 "get of unsupported state");
901 * This assumes the last 'xsave*' instruction to
902 * have requested that 'xfeature_nr' be saved.
903 * If it did not, we might be seeing and old value
904 * of the field in the buffer.
906 * This can happen because the last 'xsave' did not
907 * request that this feature be saved (unlikely)
908 * or because the "init optimization" caused it
911 if (!(xsave->header.xfeatures & BIT_ULL(xfeature_nr)))
914 return __raw_xsave_addr(xsave, xfeature_nr);
916 EXPORT_SYMBOL_GPL(get_xsave_addr);
919 * This wraps up the common operations that need to occur when retrieving
920 * data from xsave state. It first ensures that the current task was
921 * using the FPU and retrieves the data in to a buffer. It then calculates
922 * the offset of the requested field in the buffer.
924 * This function is safe to call whether the FPU is in use or not.
926 * Note that this only works on the current task.
929 * @xfeature_nr: state which is defined in xsave.h (e.g. XFEATURE_FP,
930 * XFEATURE_SSE, etc...)
932 * address of the state in the xsave area or NULL if the state
933 * is not present or is in its 'init state'.
935 const void *get_xsave_field_ptr(int xfeature_nr)
937 struct fpu *fpu = ¤t->thread.fpu;
940 * fpu__save() takes the CPU's xstate registers
941 * and saves them off to the 'fpu memory buffer.
945 return get_xsave_addr(&fpu->state.xsave, xfeature_nr);
948 #ifdef CONFIG_ARCH_HAS_PKEYS
951 * This will go out and modify PKRU register to set the access
952 * rights for @pkey to @init_val.
954 int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
955 unsigned long init_val)
958 int pkey_shift = (pkey * PKRU_BITS_PER_PKEY);
959 u32 new_pkru_bits = 0;
962 * This check implies XSAVE support. OSPKE only gets
963 * set if we enable XSAVE and we enable PKU in XCR0.
965 if (!boot_cpu_has(X86_FEATURE_OSPKE))
969 * This code should only be called with valid 'pkey'
970 * values originating from in-kernel users. Complain
971 * if a bad value is observed.
973 WARN_ON_ONCE(pkey >= arch_max_pkey());
975 /* Set the bits we need in PKRU: */
976 if (init_val & PKEY_DISABLE_ACCESS)
977 new_pkru_bits |= PKRU_AD_BIT;
978 if (init_val & PKEY_DISABLE_WRITE)
979 new_pkru_bits |= PKRU_WD_BIT;
981 /* Shift the bits in to the correct place in PKRU for pkey: */
982 new_pkru_bits <<= pkey_shift;
984 /* Get old PKRU and mask off any old bits in place: */
985 old_pkru = read_pkru();
986 old_pkru &= ~((PKRU_AD_BIT|PKRU_WD_BIT) << pkey_shift);
988 /* Write old part along with new part: */
989 write_pkru(old_pkru | new_pkru_bits);
993 #endif /* ! CONFIG_ARCH_HAS_PKEYS */
996 * Weird legacy quirk: SSE and YMM states store information in the
997 * MXCSR and MXCSR_FLAGS fields of the FP area. That means if the FP
998 * area is marked as unused in the xfeatures header, we need to copy
999 * MXCSR and MXCSR_FLAGS if either SSE or YMM are in use.
1001 static inline bool xfeatures_mxcsr_quirk(u64 xfeatures)
1003 if (!(xfeatures & (XFEATURE_MASK_SSE|XFEATURE_MASK_YMM)))
1006 if (xfeatures & XFEATURE_MASK_FP)
1012 static void fill_gap(unsigned to, void **kbuf, unsigned *pos, unsigned *count)
1015 unsigned size = to - *pos;
1019 memcpy(*kbuf, (void *)&init_fpstate.xsave + *pos, size);
1026 static void copy_part(unsigned offset, unsigned size, void *from,
1027 void **kbuf, unsigned *pos, unsigned *count)
1029 fill_gap(offset, kbuf, pos, count);
1033 memcpy(*kbuf, from, size);
1041 * Convert from kernel XSAVES compacted format to standard format and copy
1042 * to a kernel-space ptrace buffer.
1044 * It supports partial copy but pos always starts from zero. This is called
1045 * from xstateregs_get() and there we check the CPU has XSAVES.
1047 int copy_xstate_to_kernel(void *kbuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
1049 struct xstate_header header;
1050 const unsigned off_mxcsr = offsetof(struct fxregs_state, mxcsr);
1051 unsigned count = size_total;
1055 * Currently copy_regset_to_user() starts from pos 0:
1057 if (unlikely(offset_start != 0))
1061 * The destination is a ptrace buffer; we put in only user xstates:
1063 memset(&header, 0, sizeof(header));
1064 header.xfeatures = xsave->header.xfeatures;
1065 header.xfeatures &= xfeatures_mask_user();
1067 if (header.xfeatures & XFEATURE_MASK_FP)
1068 copy_part(0, off_mxcsr,
1069 &xsave->i387, &kbuf, &offset_start, &count);
1070 if (header.xfeatures & (XFEATURE_MASK_SSE | XFEATURE_MASK_YMM))
1071 copy_part(off_mxcsr, MXCSR_AND_FLAGS_SIZE,
1072 &xsave->i387.mxcsr, &kbuf, &offset_start, &count);
1073 if (header.xfeatures & XFEATURE_MASK_FP)
1074 copy_part(offsetof(struct fxregs_state, st_space), 128,
1075 &xsave->i387.st_space, &kbuf, &offset_start, &count);
1076 if (header.xfeatures & XFEATURE_MASK_SSE)
1077 copy_part(xstate_offsets[XFEATURE_MASK_SSE], 256,
1078 &xsave->i387.xmm_space, &kbuf, &offset_start, &count);
1080 * Fill xsave->i387.sw_reserved value for ptrace frame:
1082 copy_part(offsetof(struct fxregs_state, sw_reserved), 48,
1083 xstate_fx_sw_bytes, &kbuf, &offset_start, &count);
1085 * Copy xregs_state->header:
1087 copy_part(offsetof(struct xregs_state, header), sizeof(header),
1088 &header, &kbuf, &offset_start, &count);
1090 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
1092 * Copy only in-use xstates:
1094 if ((header.xfeatures >> i) & 1) {
1095 void *src = __raw_xsave_addr(xsave, i);
1097 copy_part(xstate_offsets[i], xstate_sizes[i],
1098 src, &kbuf, &offset_start, &count);
1102 fill_gap(size_total, &kbuf, &offset_start, &count);
1108 __copy_xstate_to_user(void __user *ubuf, const void *data, unsigned int offset, unsigned int size, unsigned int size_total)
1113 if (offset < size_total) {
1114 unsigned int copy = min(size, size_total - offset);
1116 if (__copy_to_user(ubuf + offset, data, copy))
1123 * Convert from kernel XSAVES compacted format to standard format and copy
1124 * to a user-space buffer. It supports partial copy but pos always starts from
1125 * zero. This is called from xstateregs_get() and there we check the CPU
1128 int copy_xstate_to_user(void __user *ubuf, struct xregs_state *xsave, unsigned int offset_start, unsigned int size_total)
1130 unsigned int offset, size;
1132 struct xstate_header header;
1135 * Currently copy_regset_to_user() starts from pos 0:
1137 if (unlikely(offset_start != 0))
1141 * The destination is a ptrace buffer; we put in only user xstates:
1143 memset(&header, 0, sizeof(header));
1144 header.xfeatures = xsave->header.xfeatures;
1145 header.xfeatures &= xfeatures_mask_user();
1148 * Copy xregs_state->header:
1150 offset = offsetof(struct xregs_state, header);
1151 size = sizeof(header);
1153 ret = __copy_xstate_to_user(ubuf, &header, offset, size, size_total);
1157 for (i = 0; i < XFEATURE_MAX; i++) {
1159 * Copy only in-use xstates:
1161 if ((header.xfeatures >> i) & 1) {
1162 void *src = __raw_xsave_addr(xsave, i);
1164 offset = xstate_offsets[i];
1165 size = xstate_sizes[i];
1167 /* The next component has to fit fully into the output buffer: */
1168 if (offset + size > size_total)
1171 ret = __copy_xstate_to_user(ubuf, src, offset, size, size_total);
1178 if (xfeatures_mxcsr_quirk(header.xfeatures)) {
1179 offset = offsetof(struct fxregs_state, mxcsr);
1180 size = MXCSR_AND_FLAGS_SIZE;
1181 __copy_xstate_to_user(ubuf, &xsave->i387.mxcsr, offset, size, size_total);
1185 * Fill xsave->i387.sw_reserved value for ptrace frame:
1187 offset = offsetof(struct fxregs_state, sw_reserved);
1188 size = sizeof(xstate_fx_sw_bytes);
1190 ret = __copy_xstate_to_user(ubuf, xstate_fx_sw_bytes, offset, size, size_total);
1198 * Convert from a ptrace standard-format kernel buffer to kernel XSAVES format
1199 * and copy to the target thread. This is called from xstateregs_set().
1201 int copy_kernel_to_xstate(struct xregs_state *xsave, const void *kbuf)
1203 unsigned int offset, size;
1205 struct xstate_header hdr;
1207 offset = offsetof(struct xregs_state, header);
1210 memcpy(&hdr, kbuf + offset, size);
1212 if (validate_user_xstate_header(&hdr))
1215 for (i = 0; i < XFEATURE_MAX; i++) {
1216 u64 mask = ((u64)1 << i);
1218 if (hdr.xfeatures & mask) {
1219 void *dst = __raw_xsave_addr(xsave, i);
1221 offset = xstate_offsets[i];
1222 size = xstate_sizes[i];
1224 memcpy(dst, kbuf + offset, size);
1228 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1229 offset = offsetof(struct fxregs_state, mxcsr);
1230 size = MXCSR_AND_FLAGS_SIZE;
1231 memcpy(&xsave->i387.mxcsr, kbuf + offset, size);
1235 * The state that came in from userspace was user-state only.
1236 * Mask all the user states out of 'xfeatures':
1238 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
1241 * Add back in the features that came in from userspace:
1243 xsave->header.xfeatures |= hdr.xfeatures;
1249 * Convert from a ptrace or sigreturn standard-format user-space buffer to
1250 * kernel XSAVES format and copy to the target thread. This is called from
1251 * xstateregs_set(), as well as potentially from the sigreturn() and
1252 * rt_sigreturn() system calls.
1254 int copy_user_to_xstate(struct xregs_state *xsave, const void __user *ubuf)
1256 unsigned int offset, size;
1258 struct xstate_header hdr;
1260 offset = offsetof(struct xregs_state, header);
1263 if (__copy_from_user(&hdr, ubuf + offset, size))
1266 if (validate_user_xstate_header(&hdr))
1269 for (i = 0; i < XFEATURE_MAX; i++) {
1270 u64 mask = ((u64)1 << i);
1272 if (hdr.xfeatures & mask) {
1273 void *dst = __raw_xsave_addr(xsave, i);
1275 offset = xstate_offsets[i];
1276 size = xstate_sizes[i];
1278 if (__copy_from_user(dst, ubuf + offset, size))
1283 if (xfeatures_mxcsr_quirk(hdr.xfeatures)) {
1284 offset = offsetof(struct fxregs_state, mxcsr);
1285 size = MXCSR_AND_FLAGS_SIZE;
1286 if (__copy_from_user(&xsave->i387.mxcsr, ubuf + offset, size))
1291 * The state that came in from userspace was user-state only.
1292 * Mask all the user states out of 'xfeatures':
1294 xsave->header.xfeatures &= XFEATURE_MASK_SUPERVISOR_ALL;
1297 * Add back in the features that came in from userspace:
1299 xsave->header.xfeatures |= hdr.xfeatures;
1305 * Save only supervisor states to the kernel buffer. This blows away all
1306 * old states, and is intended to be used only in __fpu__restore_sig(), where
1307 * user states are restored from the user buffer.
1309 void copy_supervisor_to_kernel(struct xregs_state *xstate)
1311 struct xstate_header *header;
1312 u64 max_bit, min_bit;
1316 if (WARN_ON(!boot_cpu_has(X86_FEATURE_XSAVES)))
1319 if (!xfeatures_mask_supervisor())
1322 max_bit = __fls(xfeatures_mask_supervisor());
1323 min_bit = __ffs(xfeatures_mask_supervisor());
1325 lmask = xfeatures_mask_supervisor();
1326 hmask = xfeatures_mask_supervisor() >> 32;
1327 XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
1329 /* We should never fault when copying to a kernel buffer: */
1330 if (WARN_ON_FPU(err))
1334 * At this point, the buffer has only supervisor states and must be
1335 * converted back to normal kernel format.
1337 header = &xstate->header;
1338 header->xcomp_bv |= xfeatures_mask_all;
1341 * This only moves states up in the buffer. Start with
1342 * the last state and move backwards so that states are
1343 * not overwritten until after they are moved. Note:
1344 * memmove() allows overlapping src/dst buffers.
1346 for (i = max_bit; i >= min_bit; i--) {
1347 u8 *xbuf = (u8 *)xstate;
1349 if (!((header->xfeatures >> i) & 1))
1352 /* Move xfeature 'i' into its normal location */
1353 memmove(xbuf + xstate_comp_offsets[i],
1354 xbuf + xstate_supervisor_only_offsets[i],
1359 #ifdef CONFIG_PROC_PID_ARCH_STATUS
1361 * Report the amount of time elapsed in millisecond since last AVX512
1364 static void avx512_status(struct seq_file *m, struct task_struct *task)
1366 unsigned long timestamp = READ_ONCE(task->thread.fpu.avx512_timestamp);
1371 * Report -1 if no AVX512 usage
1375 delta = (long)(jiffies - timestamp);
1377 * Cap to LONG_MAX if time difference > LONG_MAX
1381 delta = jiffies_to_msecs(delta);
1384 seq_put_decimal_ll(m, "AVX512_elapsed_ms:\t", delta);
1389 * Report architecture specific information
1391 int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
1392 struct pid *pid, struct task_struct *task)
1395 * Report AVX512 state if the processor and build option supported.
1397 if (cpu_feature_enabled(X86_FEATURE_AVX512F))
1398 avx512_status(m, task);
1402 #endif /* CONFIG_PROC_PID_ARCH_STATUS */