1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/pgtable.h>
28 #include <asm/set_memory.h>
29 #include <asm/intel-family.h>
31 static void __init spectre_v2_select_mitigation(void);
32 static void __init ssb_select_mitigation(void);
35 * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
36 * writes to SPEC_CTRL contain whatever reserved bits have been set.
38 u64 __ro_after_init x86_spec_ctrl_base;
41 * The vendor and possibly platform specific bits which can be modified in
44 static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
47 * AMD specific MSR info for Speculative Store Bypass control.
48 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
50 u64 __ro_after_init x86_amd_ls_cfg_base;
51 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
53 void __init check_bugs(void)
57 if (!IS_ENABLED(CONFIG_SMP)) {
59 print_cpu_info(&boot_cpu_data);
63 * Read the SPEC_CTRL MSR to account for reserved bits which may
64 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
65 * init code as it is not enumerated and depends on the family.
67 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
68 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
70 /* Select the proper spectre mitigation before patching alternatives */
71 spectre_v2_select_mitigation();
74 * Select proper mitigation for any exposure to the Speculative Store
75 * Bypass vulnerability.
77 ssb_select_mitigation();
81 * Check whether we are able to run this kernel safely on SMP.
83 * - i386 is no longer supported.
84 * - In order to run on anything without a TSC, we need to be
85 * compiled for a i486.
87 if (boot_cpu_data.x86 < 4)
88 panic("Kernel requires i486+ for 'invlpg' and other features");
90 init_utsname()->machine[1] =
91 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
92 alternative_instructions();
94 fpu__init_check_bugs();
95 #else /* CONFIG_X86_64 */
96 alternative_instructions();
99 * Make sure the first 2MB area is not mapped by huge pages
100 * There are typically fixed size MTRRs in there and overlapping
101 * MTRRs into large pages causes slow downs.
103 * Right now we don't do that with gbpages because there seems
104 * very little benefit for that case.
107 set_memory_4k((unsigned long)__va(0), 1);
111 /* The kernel command line selection */
112 enum spectre_v2_mitigation_cmd {
115 SPECTRE_V2_CMD_FORCE,
116 SPECTRE_V2_CMD_RETPOLINE,
117 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
118 SPECTRE_V2_CMD_RETPOLINE_AMD,
121 static const char *spectre_v2_strings[] = {
122 [SPECTRE_V2_NONE] = "Vulnerable",
123 [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline",
124 [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline",
125 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
126 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
130 #define pr_fmt(fmt) "Spectre V2 : " fmt
132 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
135 void x86_spec_ctrl_set(u64 val)
137 if (val & x86_spec_ctrl_mask)
138 WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
140 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
142 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
144 u64 x86_spec_ctrl_get_default(void)
146 u64 msrval = x86_spec_ctrl_base;
148 if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
149 msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
152 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
154 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
156 u64 host = x86_spec_ctrl_base;
158 /* Is MSR_SPEC_CTRL implemented ? */
159 if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
162 /* Intel controls SSB in MSR_SPEC_CTRL */
163 if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
164 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
166 if (host != guest_spec_ctrl)
167 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
169 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
171 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
173 u64 host = x86_spec_ctrl_base;
175 /* Is MSR_SPEC_CTRL implemented ? */
176 if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
179 /* Intel controls SSB in MSR_SPEC_CTRL */
180 if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
181 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
183 if (host != guest_spec_ctrl)
184 wrmsrl(MSR_IA32_SPEC_CTRL, host);
186 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
188 static void x86_amd_ssb_disable(void)
190 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
192 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
193 wrmsrl(MSR_AMD64_LS_CFG, msrval);
197 static bool spectre_v2_bad_module;
199 bool retpoline_module_ok(bool has_retpoline)
201 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
204 pr_err("System may be vulnerable to spectre v2\n");
205 spectre_v2_bad_module = true;
209 static inline const char *spectre_v2_module_string(void)
211 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
214 static inline const char *spectre_v2_module_string(void) { return ""; }
217 static void __init spec2_print_if_insecure(const char *reason)
219 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
220 pr_info("%s selected on command line.\n", reason);
223 static void __init spec2_print_if_secure(const char *reason)
225 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
226 pr_info("%s selected on command line.\n", reason);
229 static inline bool retp_compiler(void)
231 return __is_defined(RETPOLINE);
234 static inline bool match_option(const char *arg, int arglen, const char *opt)
236 int len = strlen(opt);
238 return len == arglen && !strncmp(arg, opt, len);
241 static const struct {
243 enum spectre_v2_mitigation_cmd cmd;
245 } mitigation_options[] = {
246 { "off", SPECTRE_V2_CMD_NONE, false },
247 { "on", SPECTRE_V2_CMD_FORCE, true },
248 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
249 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
250 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
251 { "auto", SPECTRE_V2_CMD_AUTO, false },
254 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
258 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
260 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
261 return SPECTRE_V2_CMD_NONE;
263 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
265 return SPECTRE_V2_CMD_AUTO;
267 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
268 if (!match_option(arg, ret, mitigation_options[i].option))
270 cmd = mitigation_options[i].cmd;
274 if (i >= ARRAY_SIZE(mitigation_options)) {
275 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
276 return SPECTRE_V2_CMD_AUTO;
280 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
281 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
282 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
283 !IS_ENABLED(CONFIG_RETPOLINE)) {
284 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
285 return SPECTRE_V2_CMD_AUTO;
288 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
289 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
290 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
291 return SPECTRE_V2_CMD_AUTO;
294 if (mitigation_options[i].secure)
295 spec2_print_if_secure(mitigation_options[i].option);
297 spec2_print_if_insecure(mitigation_options[i].option);
302 /* Check for Skylake-like CPUs (for RSB handling) */
303 static bool __init is_skylake_era(void)
305 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
306 boot_cpu_data.x86 == 6) {
307 switch (boot_cpu_data.x86_model) {
308 case INTEL_FAM6_SKYLAKE_MOBILE:
309 case INTEL_FAM6_SKYLAKE_DESKTOP:
310 case INTEL_FAM6_SKYLAKE_X:
311 case INTEL_FAM6_KABYLAKE_MOBILE:
312 case INTEL_FAM6_KABYLAKE_DESKTOP:
319 static void __init spectre_v2_select_mitigation(void)
321 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
322 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
325 * If the CPU is not affected and the command line mode is NONE or AUTO
326 * then nothing to do.
328 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
329 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
333 case SPECTRE_V2_CMD_NONE:
336 case SPECTRE_V2_CMD_FORCE:
337 case SPECTRE_V2_CMD_AUTO:
338 if (IS_ENABLED(CONFIG_RETPOLINE))
341 case SPECTRE_V2_CMD_RETPOLINE_AMD:
342 if (IS_ENABLED(CONFIG_RETPOLINE))
345 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
346 if (IS_ENABLED(CONFIG_RETPOLINE))
347 goto retpoline_generic;
349 case SPECTRE_V2_CMD_RETPOLINE:
350 if (IS_ENABLED(CONFIG_RETPOLINE))
354 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
358 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
360 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
361 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
362 goto retpoline_generic;
364 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
365 SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
366 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
367 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
370 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
371 SPECTRE_V2_RETPOLINE_MINIMAL;
372 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
375 spectre_v2_enabled = mode;
376 pr_info("%s\n", spectre_v2_strings[mode]);
379 * If neither SMEP nor PTI are available, there is a risk of
380 * hitting userspace addresses in the RSB after a context switch
381 * from a shallow call stack to a deeper one. To prevent this fill
382 * the entire RSB, even when using IBRS.
384 * Skylake era CPUs have a separate issue with *underflow* of the
385 * RSB, when they will predict 'ret' targets from the generic BTB.
386 * The proper mitigation for this is IBRS. If IBRS is not supported
387 * or deactivated in favour of retpolines the RSB fill on context
388 * switch is required.
390 if ((!boot_cpu_has(X86_FEATURE_PTI) &&
391 !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
392 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
393 pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
396 /* Initialize Indirect Branch Prediction Barrier if supported */
397 if (boot_cpu_has(X86_FEATURE_IBPB)) {
398 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
399 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
403 * Retpoline means the kernel is safe because it has no indirect
404 * branches. But firmware isn't, so use IBRS to protect that.
406 if (boot_cpu_has(X86_FEATURE_IBRS)) {
407 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
408 pr_info("Enabling Restricted Speculation for firmware calls\n");
413 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
415 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
417 /* The kernel command line selection */
418 enum ssb_mitigation_cmd {
419 SPEC_STORE_BYPASS_CMD_NONE,
420 SPEC_STORE_BYPASS_CMD_AUTO,
421 SPEC_STORE_BYPASS_CMD_ON,
422 SPEC_STORE_BYPASS_CMD_PRCTL,
423 SPEC_STORE_BYPASS_CMD_SECCOMP,
426 static const char *ssb_strings[] = {
427 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
428 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
429 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
430 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
433 static const struct {
435 enum ssb_mitigation_cmd cmd;
436 } ssb_mitigation_options[] = {
437 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
438 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
439 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
440 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
441 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
444 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
446 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
450 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
451 return SPEC_STORE_BYPASS_CMD_NONE;
453 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
456 return SPEC_STORE_BYPASS_CMD_AUTO;
458 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
459 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
462 cmd = ssb_mitigation_options[i].cmd;
466 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
467 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
468 return SPEC_STORE_BYPASS_CMD_AUTO;
475 static enum ssb_mitigation __init __ssb_select_mitigation(void)
477 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
478 enum ssb_mitigation_cmd cmd;
480 if (!boot_cpu_has(X86_FEATURE_SSBD))
483 cmd = ssb_parse_cmdline();
484 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
485 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
486 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
490 case SPEC_STORE_BYPASS_CMD_AUTO:
491 case SPEC_STORE_BYPASS_CMD_SECCOMP:
493 * Choose prctl+seccomp as the default mode if seccomp is
496 if (IS_ENABLED(CONFIG_SECCOMP))
497 mode = SPEC_STORE_BYPASS_SECCOMP;
499 mode = SPEC_STORE_BYPASS_PRCTL;
501 case SPEC_STORE_BYPASS_CMD_ON:
502 mode = SPEC_STORE_BYPASS_DISABLE;
504 case SPEC_STORE_BYPASS_CMD_PRCTL:
505 mode = SPEC_STORE_BYPASS_PRCTL;
507 case SPEC_STORE_BYPASS_CMD_NONE:
512 * We have three CPU feature flags that are in play here:
513 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
514 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
515 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
517 if (mode == SPEC_STORE_BYPASS_DISABLE) {
518 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
520 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
521 * a completely different MSR and bit dependent on family.
523 switch (boot_cpu_data.x86_vendor) {
524 case X86_VENDOR_INTEL:
525 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
526 x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
527 x86_spec_ctrl_set(SPEC_CTRL_SSBD);
530 x86_amd_ssb_disable();
538 static void ssb_select_mitigation(void)
540 ssb_mode = __ssb_select_mitigation();
542 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
543 pr_info("%s\n", ssb_strings[ssb_mode]);
547 #define pr_fmt(fmt) "Speculation prctl: " fmt
549 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
553 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
554 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
559 /* If speculation is force disabled, enable is not allowed */
560 if (task_spec_ssb_force_disable(task))
562 task_clear_spec_ssb_disable(task);
563 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
565 case PR_SPEC_DISABLE:
566 task_set_spec_ssb_disable(task);
567 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
569 case PR_SPEC_FORCE_DISABLE:
570 task_set_spec_ssb_disable(task);
571 task_set_spec_ssb_force_disable(task);
572 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
579 * If being set on non-current task, delay setting the CPU
580 * mitigation until it is next scheduled.
582 if (task == current && update)
583 speculative_store_bypass_update();
588 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
592 case PR_SPEC_STORE_BYPASS:
593 return ssb_prctl_set(task, ctrl);
599 #ifdef CONFIG_SECCOMP
600 void arch_seccomp_spec_mitigate(struct task_struct *task)
602 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
603 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
607 static int ssb_prctl_get(struct task_struct *task)
610 case SPEC_STORE_BYPASS_DISABLE:
611 return PR_SPEC_DISABLE;
612 case SPEC_STORE_BYPASS_SECCOMP:
613 case SPEC_STORE_BYPASS_PRCTL:
614 if (task_spec_ssb_force_disable(task))
615 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
616 if (task_spec_ssb_disable(task))
617 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
618 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
620 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
621 return PR_SPEC_ENABLE;
622 return PR_SPEC_NOT_AFFECTED;
626 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
629 case PR_SPEC_STORE_BYPASS:
630 return ssb_prctl_get(task);
636 void x86_spec_ctrl_setup_ap(void)
638 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
639 x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
641 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
642 x86_amd_ssb_disable();
647 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
648 char *buf, unsigned int bug)
650 if (!boot_cpu_has_bug(bug))
651 return sprintf(buf, "Not affected\n");
654 case X86_BUG_CPU_MELTDOWN:
655 if (boot_cpu_has(X86_FEATURE_PTI))
656 return sprintf(buf, "Mitigation: PTI\n");
660 case X86_BUG_SPECTRE_V1:
661 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
663 case X86_BUG_SPECTRE_V2:
664 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
665 boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
666 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
667 spectre_v2_module_string());
669 case X86_BUG_SPEC_STORE_BYPASS:
670 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
676 return sprintf(buf, "Vulnerable\n");
679 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
681 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
684 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
686 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
689 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
691 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
694 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
696 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);