x86/speculation: Prevent stale SPEC_CTRL msr content
[linux-block.git] / arch / x86 / kernel / cpu / bugs.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1353ebb4 2/*
1353ebb4
JF
3 * Copyright (C) 1994 Linus Torvalds
4 *
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
10 */
11#include <linux/init.h>
12#include <linux/utsname.h>
61dc0f55 13#include <linux/cpu.h>
caf7501a 14#include <linux/module.h>
a73ec77e
TG
15#include <linux/nospec.h>
16#include <linux/prctl.h>
a74cfffb 17#include <linux/sched/smt.h>
da285121 18
28a27752 19#include <asm/spec-ctrl.h>
da285121 20#include <asm/cmdline.h>
91eb1b79 21#include <asm/bugs.h>
1353ebb4 22#include <asm/processor.h>
7ebad705 23#include <asm/processor-flags.h>
952f07ec 24#include <asm/fpu/internal.h>
1353ebb4 25#include <asm/msr.h>
72c6d2db 26#include <asm/vmx.h>
1353ebb4
JF
27#include <asm/paravirt.h>
28#include <asm/alternative.h>
62a67e12 29#include <asm/pgtable.h>
d1163651 30#include <asm/set_memory.h>
c995efd5 31#include <asm/intel-family.h>
17dbca11 32#include <asm/e820/api.h>
6cb2b08f 33#include <asm/hypervisor.h>
1353ebb4 34
da285121 35static void __init spectre_v2_select_mitigation(void);
24f7fc83 36static void __init ssb_select_mitigation(void);
17dbca11 37static void __init l1tf_select_mitigation(void);
da285121 38
53c613fe
JK
39/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
40u64 x86_spec_ctrl_base;
fa8ac498 41EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
53c613fe 42static DEFINE_MUTEX(spec_ctrl_mutex);
1b86883c 43
1115a859
KRW
44/*
45 * The vendor and possibly platform specific bits which can be modified in
46 * x86_spec_ctrl_base.
47 */
be6fcb54 48static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
1115a859 49
764f3c21
KRW
50/*
51 * AMD specific MSR info for Speculative Store Bypass control.
9f65fb29 52 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
764f3c21
KRW
53 */
54u64 __ro_after_init x86_amd_ls_cfg_base;
9f65fb29 55u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
764f3c21 56
fa1202ef
TG
57/* Control conditional STIPB in switch_to() */
58DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
4c71a2b6
TG
59/* Control conditional IBPB in switch_mm() */
60DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
61/* Control unconditional IBPB in switch_mm() */
62DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
fa1202ef 63
1353ebb4
JF
64void __init check_bugs(void)
65{
66 identify_boot_cpu();
55a36b65 67
fee0aede
TG
68 /*
69 * identify_boot_cpu() initialized SMT support information, let the
70 * core code know.
71 */
bc2d8d26 72 cpu_smt_check_topology_early();
fee0aede 73
62a67e12
BP
74 if (!IS_ENABLED(CONFIG_SMP)) {
75 pr_info("CPU: ");
76 print_cpu_info(&boot_cpu_data);
77 }
78
1b86883c
KRW
79 /*
80 * Read the SPEC_CTRL MSR to account for reserved bits which may
764f3c21
KRW
81 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
82 * init code as it is not enumerated and depends on the family.
1b86883c 83 */
7eb8956a 84 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1b86883c
KRW
85 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
86
be6fcb54
TG
87 /* Allow STIBP in MSR_SPEC_CTRL if supported */
88 if (boot_cpu_has(X86_FEATURE_STIBP))
89 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
90
da285121
DW
91 /* Select the proper spectre mitigation before patching alternatives */
92 spectre_v2_select_mitigation();
93
24f7fc83
KRW
94 /*
95 * Select proper mitigation for any exposure to the Speculative Store
96 * Bypass vulnerability.
97 */
98 ssb_select_mitigation();
99
17dbca11
AK
100 l1tf_select_mitigation();
101
62a67e12 102#ifdef CONFIG_X86_32
55a36b65
BP
103 /*
104 * Check whether we are able to run this kernel safely on SMP.
105 *
106 * - i386 is no longer supported.
107 * - In order to run on anything without a TSC, we need to be
108 * compiled for a i486.
109 */
110 if (boot_cpu_data.x86 < 4)
111 panic("Kernel requires i486+ for 'invlpg' and other features");
112
bfe4bb15
MV
113 init_utsname()->machine[1] =
114 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
1353ebb4 115 alternative_instructions();
304bceda 116
4d164092 117 fpu__init_check_bugs();
62a67e12
BP
118#else /* CONFIG_X86_64 */
119 alternative_instructions();
120
121 /*
122 * Make sure the first 2MB area is not mapped by huge pages
123 * There are typically fixed size MTRRs in there and overlapping
124 * MTRRs into large pages causes slow downs.
125 *
126 * Right now we don't do that with gbpages because there seems
127 * very little benefit for that case.
128 */
129 if (!direct_gbpages)
130 set_memory_4k((unsigned long)__va(0), 1);
131#endif
1353ebb4 132}
61dc0f55 133
cc69b349
BP
134void
135x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
5cf68754 136{
be6fcb54 137 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
cc69b349 138 struct thread_info *ti = current_thread_info();
885f82bf 139
7eb8956a 140 /* Is MSR_SPEC_CTRL implemented ? */
cc69b349 141 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
be6fcb54
TG
142 /*
143 * Restrict guest_spec_ctrl to supported values. Clear the
144 * modifiable bits in the host base value and or the
145 * modifiable bits from the guest value.
146 */
147 guestval = hostval & ~x86_spec_ctrl_mask;
148 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
149
cc69b349 150 /* SSBD controlled in MSR_SPEC_CTRL */
612bc3b3
TL
151 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
152 static_cpu_has(X86_FEATURE_AMD_SSBD))
be6fcb54 153 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
cc69b349 154
5bfbe3ad
TC
155 /* Conditional STIBP enabled? */
156 if (static_branch_unlikely(&switch_to_cond_stibp))
157 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
158
be6fcb54
TG
159 if (hostval != guestval) {
160 msrval = setguest ? guestval : hostval;
161 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
cc69b349
BP
162 }
163 }
47c61b39
TG
164
165 /*
166 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
167 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
168 */
169 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
170 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
171 return;
172
173 /*
174 * If the host has SSBD mitigation enabled, force it in the host's
175 * virtual MSR value. If its not permanently enabled, evaluate
176 * current's TIF_SSBD thread flag.
177 */
178 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
179 hostval = SPEC_CTRL_SSBD;
180 else
181 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
182
183 /* Sanitize the guest value */
184 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
185
186 if (hostval != guestval) {
187 unsigned long tif;
188
189 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
190 ssbd_spec_ctrl_to_tif(hostval);
191
26c4d75b 192 speculation_ctrl_update(tif);
47c61b39 193 }
5cf68754 194}
cc69b349 195EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
5cf68754 196
9f65fb29 197static void x86_amd_ssb_disable(void)
764f3c21 198{
9f65fb29 199 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
764f3c21 200
11fb0683
TL
201 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
202 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
203 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
764f3c21
KRW
204 wrmsrl(MSR_AMD64_LS_CFG, msrval);
205}
206
15d6b7aa
TG
207#undef pr_fmt
208#define pr_fmt(fmt) "Spectre V2 : " fmt
209
210static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
211 SPECTRE_V2_NONE;
212
fa1202ef
TG
213static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init =
214 SPECTRE_V2_USER_NONE;
215
caf7501a 216#ifdef RETPOLINE
e383095c
TG
217static bool spectre_v2_bad_module;
218
caf7501a
AK
219bool retpoline_module_ok(bool has_retpoline)
220{
221 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
222 return true;
223
e698dcdf 224 pr_err("System may be vulnerable to spectre v2\n");
caf7501a
AK
225 spectre_v2_bad_module = true;
226 return false;
227}
e383095c
TG
228
229static inline const char *spectre_v2_module_string(void)
230{
231 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
232}
233#else
234static inline const char *spectre_v2_module_string(void) { return ""; }
caf7501a 235#endif
da285121 236
da285121
DW
237static inline bool match_option(const char *arg, int arglen, const char *opt)
238{
239 int len = strlen(opt);
240
241 return len == arglen && !strncmp(arg, opt, len);
242}
243
15d6b7aa
TG
244/* The kernel command line selection for spectre v2 */
245enum spectre_v2_mitigation_cmd {
246 SPECTRE_V2_CMD_NONE,
247 SPECTRE_V2_CMD_AUTO,
248 SPECTRE_V2_CMD_FORCE,
249 SPECTRE_V2_CMD_RETPOLINE,
250 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
251 SPECTRE_V2_CMD_RETPOLINE_AMD,
252};
253
fa1202ef
TG
254enum spectre_v2_user_cmd {
255 SPECTRE_V2_USER_CMD_NONE,
256 SPECTRE_V2_USER_CMD_AUTO,
257 SPECTRE_V2_USER_CMD_FORCE,
258};
259
260static const char * const spectre_v2_user_strings[] = {
261 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
262 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
263};
264
265static const struct {
266 const char *option;
267 enum spectre_v2_user_cmd cmd;
268 bool secure;
269} v2_user_options[] __initdata = {
270 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
271 { "off", SPECTRE_V2_USER_CMD_NONE, false },
272 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
273};
274
275static void __init spec_v2_user_print_cond(const char *reason, bool secure)
276{
277 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
278 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
279}
280
281static enum spectre_v2_user_cmd __init
282spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
283{
284 char arg[20];
285 int ret, i;
286
287 switch (v2_cmd) {
288 case SPECTRE_V2_CMD_NONE:
289 return SPECTRE_V2_USER_CMD_NONE;
290 case SPECTRE_V2_CMD_FORCE:
291 return SPECTRE_V2_USER_CMD_FORCE;
292 default:
293 break;
294 }
295
296 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
297 arg, sizeof(arg));
298 if (ret < 0)
299 return SPECTRE_V2_USER_CMD_AUTO;
300
301 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
302 if (match_option(arg, ret, v2_user_options[i].option)) {
303 spec_v2_user_print_cond(v2_user_options[i].option,
304 v2_user_options[i].secure);
305 return v2_user_options[i].cmd;
306 }
307 }
308
309 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
310 return SPECTRE_V2_USER_CMD_AUTO;
311}
312
313static void __init
314spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
315{
316 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
317 bool smt_possible = IS_ENABLED(CONFIG_SMP);
318
319 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
320 return;
321
322 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
323 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
324 smt_possible = false;
325
326 switch (spectre_v2_parse_user_cmdline(v2_cmd)) {
327 case SPECTRE_V2_USER_CMD_AUTO:
328 case SPECTRE_V2_USER_CMD_NONE:
329 goto set_mode;
330 case SPECTRE_V2_USER_CMD_FORCE:
331 mode = SPECTRE_V2_USER_STRICT;
332 break;
333 }
334
335 /* Initialize Indirect Branch Prediction Barrier */
336 if (boot_cpu_has(X86_FEATURE_IBPB)) {
337 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
4c71a2b6
TG
338
339 switch (mode) {
340 case SPECTRE_V2_USER_STRICT:
341 static_branch_enable(&switch_mm_always_ibpb);
342 break;
343 default:
344 break;
345 }
346
347 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
348 mode == SPECTRE_V2_USER_STRICT ? "always-on" : "conditional");
fa1202ef
TG
349 }
350
351 /* If enhanced IBRS is enabled no STIPB required */
352 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
353 return;
354
355set_mode:
356 spectre_v2_user = mode;
357 /* Only print the STIBP mode when SMT possible */
358 if (smt_possible)
359 pr_info("%s\n", spectre_v2_user_strings[mode]);
360}
361
8770709f 362static const char * const spectre_v2_strings[] = {
15d6b7aa
TG
363 [SPECTRE_V2_NONE] = "Vulnerable",
364 [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline",
365 [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline",
366 [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS",
367};
368
9005c683
KA
369static const struct {
370 const char *option;
371 enum spectre_v2_mitigation_cmd cmd;
372 bool secure;
30ba72a9 373} mitigation_options[] __initdata = {
15d6b7aa
TG
374 { "off", SPECTRE_V2_CMD_NONE, false },
375 { "on", SPECTRE_V2_CMD_FORCE, true },
376 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
377 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false },
378 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
379 { "auto", SPECTRE_V2_CMD_AUTO, false },
9005c683
KA
380};
381
495d470e 382static void __init spec_v2_print_cond(const char *reason, bool secure)
15d6b7aa 383{
495d470e 384 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
15d6b7aa
TG
385 pr_info("%s selected on command line.\n", reason);
386}
387
da285121
DW
388static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
389{
15d6b7aa 390 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
da285121 391 char arg[20];
9005c683 392 int ret, i;
9005c683
KA
393
394 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
395 return SPECTRE_V2_CMD_NONE;
9005c683 396
24848509
TC
397 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
398 if (ret < 0)
399 return SPECTRE_V2_CMD_AUTO;
400
401 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
402 if (!match_option(arg, ret, mitigation_options[i].option))
403 continue;
404 cmd = mitigation_options[i].cmd;
405 break;
406 }
407
408 if (i >= ARRAY_SIZE(mitigation_options)) {
409 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
410 return SPECTRE_V2_CMD_AUTO;
da285121
DW
411 }
412
9005c683
KA
413 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
414 cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
415 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
416 !IS_ENABLED(CONFIG_RETPOLINE)) {
21e433bd 417 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
da285121 418 return SPECTRE_V2_CMD_AUTO;
9005c683
KA
419 }
420
421 if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
1a576b23 422 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
9005c683
KA
423 boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
424 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
425 return SPECTRE_V2_CMD_AUTO;
426 }
427
495d470e
TG
428 spec_v2_print_cond(mitigation_options[i].option,
429 mitigation_options[i].secure);
9005c683 430 return cmd;
da285121
DW
431}
432
433static void __init spectre_v2_select_mitigation(void)
434{
435 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
436 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
437
438 /*
439 * If the CPU is not affected and the command line mode is NONE or AUTO
440 * then nothing to do.
441 */
442 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
443 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
444 return;
445
446 switch (cmd) {
447 case SPECTRE_V2_CMD_NONE:
448 return;
449
450 case SPECTRE_V2_CMD_FORCE:
da285121 451 case SPECTRE_V2_CMD_AUTO:
706d5168
SP
452 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
453 mode = SPECTRE_V2_IBRS_ENHANCED;
454 /* Force it so VMEXIT will restore correctly */
455 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
456 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
457 goto specv2_set_mode;
458 }
9471eee9
DL
459 if (IS_ENABLED(CONFIG_RETPOLINE))
460 goto retpoline_auto;
461 break;
da285121
DW
462 case SPECTRE_V2_CMD_RETPOLINE_AMD:
463 if (IS_ENABLED(CONFIG_RETPOLINE))
464 goto retpoline_amd;
465 break;
466 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
467 if (IS_ENABLED(CONFIG_RETPOLINE))
468 goto retpoline_generic;
469 break;
470 case SPECTRE_V2_CMD_RETPOLINE:
471 if (IS_ENABLED(CONFIG_RETPOLINE))
472 goto retpoline_auto;
473 break;
474 }
21e433bd 475 pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
da285121
DW
476 return;
477
478retpoline_auto:
1a576b23
PW
479 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
480 boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
da285121
DW
481 retpoline_amd:
482 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
21e433bd 483 pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
da285121
DW
484 goto retpoline_generic;
485 }
ef014aae 486 mode = SPECTRE_V2_RETPOLINE_AMD;
da285121
DW
487 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
488 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
489 } else {
490 retpoline_generic:
ef014aae 491 mode = SPECTRE_V2_RETPOLINE_GENERIC;
da285121
DW
492 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
493 }
494
706d5168 495specv2_set_mode:
da285121
DW
496 spectre_v2_enabled = mode;
497 pr_info("%s\n", spectre_v2_strings[mode]);
c995efd5
DW
498
499 /*
fdf82a78
JK
500 * If spectre v2 protection has been enabled, unconditionally fill
501 * RSB during a context switch; this protects against two independent
502 * issues:
c995efd5 503 *
fdf82a78
JK
504 * - RSB underflow (and switch to BTB) on Skylake+
505 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
c995efd5 506 */
fdf82a78
JK
507 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
508 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
20ffa1ca 509
dd84441a
DW
510 /*
511 * Retpoline means the kernel is safe because it has no indirect
706d5168
SP
512 * branches. Enhanced IBRS protects firmware too, so, enable restricted
513 * speculation around firmware calls only when Enhanced IBRS isn't
514 * supported.
515 *
516 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
517 * the user might select retpoline on the kernel command line and if
518 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
519 * enable IBRS around firmware calls.
dd84441a 520 */
706d5168 521 if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
dd84441a
DW
522 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
523 pr_info("Enabling Restricted Speculation for firmware calls\n");
524 }
53c613fe 525
fa1202ef
TG
526 /* Set up IBPB and STIBP depending on the general spectre V2 command */
527 spectre_v2_user_select_mitigation(cmd);
528
53c613fe
JK
529 /* Enable STIBP if appropriate */
530 arch_smt_update();
da285121
DW
531}
532
15d6b7aa
TG
533static bool stibp_needed(void)
534{
15d6b7aa
TG
535 /* Enhanced IBRS makes using STIBP unnecessary. */
536 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
537 return false;
538
fa1202ef
TG
539 /* Check for strict user mitigation mode */
540 return spectre_v2_user == SPECTRE_V2_USER_STRICT;
15d6b7aa
TG
541}
542
543static void update_stibp_msr(void *info)
544{
545 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
546}
547
548void arch_smt_update(void)
549{
550 u64 mask;
551
552 if (!stibp_needed())
553 return;
554
555 mutex_lock(&spec_ctrl_mutex);
556
557 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
558 if (sched_smt_active())
559 mask |= SPEC_CTRL_STIBP;
560
561 if (mask != x86_spec_ctrl_base) {
562 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
563 mask & SPEC_CTRL_STIBP ? "Enabling" : "Disabling");
564 x86_spec_ctrl_base = mask;
565 on_each_cpu(update_stibp_msr, NULL, 1);
566 }
567 mutex_unlock(&spec_ctrl_mutex);
568}
569
24f7fc83
KRW
570#undef pr_fmt
571#define pr_fmt(fmt) "Speculative Store Bypass: " fmt
572
f9544b2b 573static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
24f7fc83
KRW
574
575/* The kernel command line selection */
576enum ssb_mitigation_cmd {
577 SPEC_STORE_BYPASS_CMD_NONE,
578 SPEC_STORE_BYPASS_CMD_AUTO,
579 SPEC_STORE_BYPASS_CMD_ON,
a73ec77e 580 SPEC_STORE_BYPASS_CMD_PRCTL,
f21b53b2 581 SPEC_STORE_BYPASS_CMD_SECCOMP,
24f7fc83
KRW
582};
583
8770709f 584static const char * const ssb_strings[] = {
24f7fc83 585 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
a73ec77e 586 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
f21b53b2
KC
587 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
588 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
24f7fc83
KRW
589};
590
591static const struct {
592 const char *option;
593 enum ssb_mitigation_cmd cmd;
30ba72a9 594} ssb_mitigation_options[] __initdata = {
f21b53b2
KC
595 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
596 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
597 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
598 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
599 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
24f7fc83
KRW
600};
601
602static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
603{
604 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
605 char arg[20];
606 int ret, i;
607
608 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
609 return SPEC_STORE_BYPASS_CMD_NONE;
610 } else {
611 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
612 arg, sizeof(arg));
613 if (ret < 0)
614 return SPEC_STORE_BYPASS_CMD_AUTO;
615
616 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
617 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
618 continue;
619
620 cmd = ssb_mitigation_options[i].cmd;
621 break;
622 }
623
624 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
625 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
626 return SPEC_STORE_BYPASS_CMD_AUTO;
627 }
628 }
629
630 return cmd;
631}
632
d66d8ff3 633static enum ssb_mitigation __init __ssb_select_mitigation(void)
24f7fc83
KRW
634{
635 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
636 enum ssb_mitigation_cmd cmd;
637
9f65fb29 638 if (!boot_cpu_has(X86_FEATURE_SSBD))
24f7fc83
KRW
639 return mode;
640
641 cmd = ssb_parse_cmdline();
642 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
643 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
644 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
645 return mode;
646
647 switch (cmd) {
648 case SPEC_STORE_BYPASS_CMD_AUTO:
f21b53b2
KC
649 case SPEC_STORE_BYPASS_CMD_SECCOMP:
650 /*
651 * Choose prctl+seccomp as the default mode if seccomp is
652 * enabled.
653 */
654 if (IS_ENABLED(CONFIG_SECCOMP))
655 mode = SPEC_STORE_BYPASS_SECCOMP;
656 else
657 mode = SPEC_STORE_BYPASS_PRCTL;
a73ec77e 658 break;
24f7fc83
KRW
659 case SPEC_STORE_BYPASS_CMD_ON:
660 mode = SPEC_STORE_BYPASS_DISABLE;
661 break;
a73ec77e
TG
662 case SPEC_STORE_BYPASS_CMD_PRCTL:
663 mode = SPEC_STORE_BYPASS_PRCTL;
664 break;
24f7fc83
KRW
665 case SPEC_STORE_BYPASS_CMD_NONE:
666 break;
667 }
668
77243971
KRW
669 /*
670 * We have three CPU feature flags that are in play here:
671 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
9f65fb29 672 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
77243971
KRW
673 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
674 */
a73ec77e 675 if (mode == SPEC_STORE_BYPASS_DISABLE) {
24f7fc83 676 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
77243971 677 /*
6ac2f49e
KRW
678 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
679 * use a completely different MSR and bit dependent on family.
77243971 680 */
612bc3b3
TL
681 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
682 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
108fab4b 683 x86_amd_ssb_disable();
612bc3b3 684 } else {
9f65fb29 685 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
be6fcb54 686 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
4b59bdb5 687 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
77243971
KRW
688 }
689 }
690
24f7fc83
KRW
691 return mode;
692}
693
ffed645e 694static void ssb_select_mitigation(void)
24f7fc83
KRW
695{
696 ssb_mode = __ssb_select_mitigation();
697
698 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
699 pr_info("%s\n", ssb_strings[ssb_mode]);
700}
701
da285121 702#undef pr_fmt
f21b53b2 703#define pr_fmt(fmt) "Speculation prctl: " fmt
da285121 704
6d991ba5 705static void task_update_spec_tif(struct task_struct *tsk)
a73ec77e 706{
6d991ba5
TG
707 /* Force the update of the real TIF bits */
708 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
e6da8bb6
TG
709
710 /*
711 * Immediately update the speculation control MSRs for the current
712 * task, but for a non-current task delay setting the CPU
713 * mitigation until it is scheduled next.
714 *
715 * This can only happen for SECCOMP mitigation. For PRCTL it's
716 * always the current task.
717 */
6d991ba5 718 if (tsk == current)
e6da8bb6
TG
719 speculation_ctrl_update_current();
720}
721
722static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
723{
f21b53b2
KC
724 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
725 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
a73ec77e
TG
726 return -ENXIO;
727
356e4bff
TG
728 switch (ctrl) {
729 case PR_SPEC_ENABLE:
730 /* If speculation is force disabled, enable is not allowed */
731 if (task_spec_ssb_force_disable(task))
732 return -EPERM;
733 task_clear_spec_ssb_disable(task);
6d991ba5 734 task_update_spec_tif(task);
356e4bff
TG
735 break;
736 case PR_SPEC_DISABLE:
737 task_set_spec_ssb_disable(task);
6d991ba5 738 task_update_spec_tif(task);
356e4bff
TG
739 break;
740 case PR_SPEC_FORCE_DISABLE:
741 task_set_spec_ssb_disable(task);
742 task_set_spec_ssb_force_disable(task);
6d991ba5 743 task_update_spec_tif(task);
356e4bff
TG
744 break;
745 default:
746 return -ERANGE;
747 }
a73ec77e
TG
748 return 0;
749}
750
8bf37d8c
TG
751int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
752 unsigned long ctrl)
753{
754 switch (which) {
755 case PR_SPEC_STORE_BYPASS:
756 return ssb_prctl_set(task, ctrl);
757 default:
758 return -ENODEV;
759 }
760}
761
762#ifdef CONFIG_SECCOMP
763void arch_seccomp_spec_mitigate(struct task_struct *task)
764{
f21b53b2
KC
765 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
766 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
8bf37d8c
TG
767}
768#endif
769
7bbf1373 770static int ssb_prctl_get(struct task_struct *task)
a73ec77e
TG
771{
772 switch (ssb_mode) {
773 case SPEC_STORE_BYPASS_DISABLE:
774 return PR_SPEC_DISABLE;
f21b53b2 775 case SPEC_STORE_BYPASS_SECCOMP:
a73ec77e 776 case SPEC_STORE_BYPASS_PRCTL:
356e4bff
TG
777 if (task_spec_ssb_force_disable(task))
778 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
779 if (task_spec_ssb_disable(task))
a73ec77e
TG
780 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
781 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
782 default:
783 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
784 return PR_SPEC_ENABLE;
785 return PR_SPEC_NOT_AFFECTED;
786 }
787}
788
7bbf1373 789int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
a73ec77e
TG
790{
791 switch (which) {
792 case PR_SPEC_STORE_BYPASS:
7bbf1373 793 return ssb_prctl_get(task);
a73ec77e
TG
794 default:
795 return -ENODEV;
796 }
797}
798
77243971
KRW
799void x86_spec_ctrl_setup_ap(void)
800{
7eb8956a 801 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
4b59bdb5 802 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
764f3c21
KRW
803
804 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
9f65fb29 805 x86_amd_ssb_disable();
77243971
KRW
806}
807
56563f53
KRW
808#undef pr_fmt
809#define pr_fmt(fmt) "L1TF: " fmt
72c6d2db 810
d90a7a0e
JK
811/* Default mitigation for L1TF-affected CPUs */
812enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
72c6d2db 813#if IS_ENABLED(CONFIG_KVM_INTEL)
d90a7a0e 814EXPORT_SYMBOL_GPL(l1tf_mitigation);
1eb46908 815#endif
895ae47f 816enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
72c6d2db 817EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
72c6d2db 818
cc51e542
AK
819/*
820 * These CPUs all support 44bits physical address space internally in the
821 * cache but CPUID can report a smaller number of physical address bits.
822 *
823 * The L1TF mitigation uses the top most address bit for the inversion of
824 * non present PTEs. When the installed memory reaches into the top most
825 * address bit due to memory holes, which has been observed on machines
826 * which report 36bits physical address bits and have 32G RAM installed,
827 * then the mitigation range check in l1tf_select_mitigation() triggers.
828 * This is a false positive because the mitigation is still possible due to
829 * the fact that the cache uses 44bit internally. Use the cache bits
830 * instead of the reported physical bits and adjust them on the affected
831 * machines to 44bit if the reported bits are less than 44.
832 */
833static void override_cache_bits(struct cpuinfo_x86 *c)
834{
835 if (c->x86 != 6)
836 return;
837
838 switch (c->x86_model) {
839 case INTEL_FAM6_NEHALEM:
840 case INTEL_FAM6_WESTMERE:
841 case INTEL_FAM6_SANDYBRIDGE:
842 case INTEL_FAM6_IVYBRIDGE:
843 case INTEL_FAM6_HASWELL_CORE:
844 case INTEL_FAM6_HASWELL_ULT:
845 case INTEL_FAM6_HASWELL_GT3E:
846 case INTEL_FAM6_BROADWELL_CORE:
847 case INTEL_FAM6_BROADWELL_GT3E:
848 case INTEL_FAM6_SKYLAKE_MOBILE:
849 case INTEL_FAM6_SKYLAKE_DESKTOP:
850 case INTEL_FAM6_KABYLAKE_MOBILE:
851 case INTEL_FAM6_KABYLAKE_DESKTOP:
852 if (c->x86_cache_bits < 44)
853 c->x86_cache_bits = 44;
854 break;
855 }
856}
857
56563f53
KRW
858static void __init l1tf_select_mitigation(void)
859{
860 u64 half_pa;
861
862 if (!boot_cpu_has_bug(X86_BUG_L1TF))
863 return;
864
cc51e542
AK
865 override_cache_bits(&boot_cpu_data);
866
d90a7a0e
JK
867 switch (l1tf_mitigation) {
868 case L1TF_MITIGATION_OFF:
869 case L1TF_MITIGATION_FLUSH_NOWARN:
870 case L1TF_MITIGATION_FLUSH:
871 break;
872 case L1TF_MITIGATION_FLUSH_NOSMT:
873 case L1TF_MITIGATION_FULL:
874 cpu_smt_disable(false);
875 break;
876 case L1TF_MITIGATION_FULL_FORCE:
877 cpu_smt_disable(true);
878 break;
879 }
880
56563f53
KRW
881#if CONFIG_PGTABLE_LEVELS == 2
882 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
883 return;
884#endif
885
56563f53
KRW
886 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
887 if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
888 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
6a012288
VB
889 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
890 half_pa);
891 pr_info("However, doing so will make a part of your RAM unusable.\n");
892 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
56563f53
KRW
893 return;
894 }
895
896 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
897}
d90a7a0e
JK
898
899static int __init l1tf_cmdline(char *str)
900{
901 if (!boot_cpu_has_bug(X86_BUG_L1TF))
902 return 0;
903
904 if (!str)
905 return -EINVAL;
906
907 if (!strcmp(str, "off"))
908 l1tf_mitigation = L1TF_MITIGATION_OFF;
909 else if (!strcmp(str, "flush,nowarn"))
910 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
911 else if (!strcmp(str, "flush"))
912 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
913 else if (!strcmp(str, "flush,nosmt"))
914 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
915 else if (!strcmp(str, "full"))
916 l1tf_mitigation = L1TF_MITIGATION_FULL;
917 else if (!strcmp(str, "full,force"))
918 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
919
920 return 0;
921}
922early_param("l1tf", l1tf_cmdline);
923
56563f53
KRW
924#undef pr_fmt
925
61dc0f55 926#ifdef CONFIG_SYSFS
d1059518 927
72c6d2db
TG
928#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
929
930#if IS_ENABLED(CONFIG_KVM_INTEL)
8770709f 931static const char * const l1tf_vmx_states[] = {
a7b9020b
TG
932 [VMENTER_L1D_FLUSH_AUTO] = "auto",
933 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
934 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
935 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
936 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
8e0b2b91 937 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
72c6d2db
TG
938};
939
940static ssize_t l1tf_show_state(char *buf)
941{
942 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
943 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
944
ea156d19
PB
945 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
946 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
130d6f94 947 sched_smt_active())) {
ea156d19
PB
948 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
949 l1tf_vmx_states[l1tf_vmx_mitigation]);
130d6f94 950 }
ea156d19
PB
951
952 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
953 l1tf_vmx_states[l1tf_vmx_mitigation],
130d6f94 954 sched_smt_active() ? "vulnerable" : "disabled");
72c6d2db
TG
955}
956#else
957static ssize_t l1tf_show_state(char *buf)
958{
959 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
960}
961#endif
962
a8f76ae4
TC
963static char *stibp_state(void)
964{
34bce7c9
TC
965 if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED)
966 return "";
967
fa1202ef
TG
968 switch (spectre_v2_user) {
969 case SPECTRE_V2_USER_NONE:
970 return ", STIBP: disabled";
971 case SPECTRE_V2_USER_STRICT:
972 return ", STIBP: forced";
973 }
974 return "";
a8f76ae4
TC
975}
976
977static char *ibpb_state(void)
978{
4c71a2b6
TG
979 if (boot_cpu_has(X86_FEATURE_IBPB)) {
980 switch (spectre_v2_user) {
981 case SPECTRE_V2_USER_NONE:
982 return ", IBPB: disabled";
983 case SPECTRE_V2_USER_STRICT:
984 return ", IBPB: always-on";
985 }
986 }
987 return "";
a8f76ae4
TC
988}
989
7bb4d366 990static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
ffed645e 991 char *buf, unsigned int bug)
61dc0f55 992{
d1059518 993 if (!boot_cpu_has_bug(bug))
61dc0f55 994 return sprintf(buf, "Not affected\n");
d1059518
KRW
995
996 switch (bug) {
997 case X86_BUG_CPU_MELTDOWN:
998 if (boot_cpu_has(X86_FEATURE_PTI))
999 return sprintf(buf, "Mitigation: PTI\n");
1000
6cb2b08f
JK
1001 if (hypervisor_is_type(X86_HYPER_XEN_PV))
1002 return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
1003
d1059518
KRW
1004 break;
1005
1006 case X86_BUG_SPECTRE_V1:
1007 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
1008
1009 case X86_BUG_SPECTRE_V2:
b86bda04 1010 return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
a8f76ae4 1011 ibpb_state(),
d1059518 1012 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
a8f76ae4 1013 stibp_state(),
bb4b3b77 1014 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
d1059518
KRW
1015 spectre_v2_module_string());
1016
24f7fc83
KRW
1017 case X86_BUG_SPEC_STORE_BYPASS:
1018 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1019
17dbca11
AK
1020 case X86_BUG_L1TF:
1021 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
72c6d2db 1022 return l1tf_show_state(buf);
17dbca11 1023 break;
d1059518
KRW
1024 default:
1025 break;
1026 }
1027
61dc0f55
TG
1028 return sprintf(buf, "Vulnerable\n");
1029}
1030
d1059518
KRW
1031ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1032{
1033 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1034}
1035
21e433bd 1036ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
61dc0f55 1037{
d1059518 1038 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
61dc0f55
TG
1039}
1040
21e433bd 1041ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
61dc0f55 1042{
d1059518 1043 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
61dc0f55 1044}
c456442c
KRW
1045
1046ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1047{
1048 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1049}
17dbca11
AK
1050
1051ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1052{
1053 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1054}
61dc0f55 1055#endif