platform/x86/amd/pmc: Extend Framework 13 quirk to more BIOSes
[linux-block.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/cpu.h>
13 #include <linux/module.h>
14 #include <linux/nospec.h>
15 #include <linux/prctl.h>
16 #include <linux/sched/smt.h>
17 #include <linux/pgtable.h>
18 #include <linux/bpf.h>
19
20 #include <asm/spec-ctrl.h>
21 #include <asm/cmdline.h>
22 #include <asm/bugs.h>
23 #include <asm/processor.h>
24 #include <asm/processor-flags.h>
25 #include <asm/fpu/api.h>
26 #include <asm/msr.h>
27 #include <asm/vmx.h>
28 #include <asm/paravirt.h>
29 #include <asm/intel-family.h>
30 #include <asm/e820/api.h>
31 #include <asm/hypervisor.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cpu.h>
34
35 #include "cpu.h"
36
37 static void __init spectre_v1_select_mitigation(void);
38 static void __init spectre_v2_select_mitigation(void);
39 static void __init retbleed_select_mitigation(void);
40 static void __init spectre_v2_user_select_mitigation(void);
41 static void __init ssb_select_mitigation(void);
42 static void __init l1tf_select_mitigation(void);
43 static void __init mds_select_mitigation(void);
44 static void __init md_clear_update_mitigation(void);
45 static void __init md_clear_select_mitigation(void);
46 static void __init taa_select_mitigation(void);
47 static void __init mmio_select_mitigation(void);
48 static void __init srbds_select_mitigation(void);
49 static void __init l1d_flush_select_mitigation(void);
50 static void __init srso_select_mitigation(void);
51 static void __init gds_select_mitigation(void);
52
53 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
54 u64 x86_spec_ctrl_base;
55 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
56
57 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
58 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
59 EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
60
61 u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
62 EXPORT_SYMBOL_GPL(x86_pred_cmd);
63
64 static DEFINE_MUTEX(spec_ctrl_mutex);
65
66 void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
67
68 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
69 static void update_spec_ctrl(u64 val)
70 {
71         this_cpu_write(x86_spec_ctrl_current, val);
72         wrmsrl(MSR_IA32_SPEC_CTRL, val);
73 }
74
75 /*
76  * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
77  * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
78  */
79 void update_spec_ctrl_cond(u64 val)
80 {
81         if (this_cpu_read(x86_spec_ctrl_current) == val)
82                 return;
83
84         this_cpu_write(x86_spec_ctrl_current, val);
85
86         /*
87          * When KERNEL_IBRS this MSR is written on return-to-user, unless
88          * forced the update can be delayed until that time.
89          */
90         if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
91                 wrmsrl(MSR_IA32_SPEC_CTRL, val);
92 }
93
94 noinstr u64 spec_ctrl_current(void)
95 {
96         return this_cpu_read(x86_spec_ctrl_current);
97 }
98 EXPORT_SYMBOL_GPL(spec_ctrl_current);
99
100 /*
101  * AMD specific MSR info for Speculative Store Bypass control.
102  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
103  */
104 u64 __ro_after_init x86_amd_ls_cfg_base;
105 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
106
107 /* Control conditional STIBP in switch_to() */
108 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
109 /* Control conditional IBPB in switch_mm() */
110 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
111 /* Control unconditional IBPB in switch_mm() */
112 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
113
114 /* Control MDS CPU buffer clear before idling (halt, mwait) */
115 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
116 EXPORT_SYMBOL_GPL(mds_idle_clear);
117
118 /*
119  * Controls whether l1d flush based mitigations are enabled,
120  * based on hw features and admin setting via boot parameter
121  * defaults to false
122  */
123 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
124
125 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
126 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
127 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
128
129 void __init cpu_select_mitigations(void)
130 {
131         /*
132          * Read the SPEC_CTRL MSR to account for reserved bits which may
133          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
134          * init code as it is not enumerated and depends on the family.
135          */
136         if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) {
137                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
138
139                 /*
140                  * Previously running kernel (kexec), may have some controls
141                  * turned ON. Clear them and let the mitigations setup below
142                  * rediscover them based on configuration.
143                  */
144                 x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK;
145         }
146
147         /* Select the proper CPU mitigations before patching alternatives: */
148         spectre_v1_select_mitigation();
149         spectre_v2_select_mitigation();
150         /*
151          * retbleed_select_mitigation() relies on the state set by
152          * spectre_v2_select_mitigation(); specifically it wants to know about
153          * spectre_v2=ibrs.
154          */
155         retbleed_select_mitigation();
156         /*
157          * spectre_v2_user_select_mitigation() relies on the state set by
158          * retbleed_select_mitigation(); specifically the STIBP selection is
159          * forced for UNRET or IBPB.
160          */
161         spectre_v2_user_select_mitigation();
162         ssb_select_mitigation();
163         l1tf_select_mitigation();
164         md_clear_select_mitigation();
165         srbds_select_mitigation();
166         l1d_flush_select_mitigation();
167
168         /*
169          * srso_select_mitigation() depends and must run after
170          * retbleed_select_mitigation().
171          */
172         srso_select_mitigation();
173         gds_select_mitigation();
174 }
175
176 /*
177  * NOTE: This function is *only* called for SVM, since Intel uses
178  * MSR_IA32_SPEC_CTRL for SSBD.
179  */
180 void
181 x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest)
182 {
183         u64 guestval, hostval;
184         struct thread_info *ti = current_thread_info();
185
186         /*
187          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
188          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
189          */
190         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
191             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
192                 return;
193
194         /*
195          * If the host has SSBD mitigation enabled, force it in the host's
196          * virtual MSR value. If its not permanently enabled, evaluate
197          * current's TIF_SSBD thread flag.
198          */
199         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
200                 hostval = SPEC_CTRL_SSBD;
201         else
202                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
203
204         /* Sanitize the guest value */
205         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
206
207         if (hostval != guestval) {
208                 unsigned long tif;
209
210                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
211                                  ssbd_spec_ctrl_to_tif(hostval);
212
213                 speculation_ctrl_update(tif);
214         }
215 }
216 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
217
218 static void x86_amd_ssb_disable(void)
219 {
220         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
221
222         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
223                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
224         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
225                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
226 }
227
228 #undef pr_fmt
229 #define pr_fmt(fmt)     "MDS: " fmt
230
231 /* Default mitigation for MDS-affected CPUs */
232 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
233 static bool mds_nosmt __ro_after_init = false;
234
235 static const char * const mds_strings[] = {
236         [MDS_MITIGATION_OFF]    = "Vulnerable",
237         [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
238         [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
239 };
240
241 static void __init mds_select_mitigation(void)
242 {
243         if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
244                 mds_mitigation = MDS_MITIGATION_OFF;
245                 return;
246         }
247
248         if (mds_mitigation == MDS_MITIGATION_FULL) {
249                 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
250                         mds_mitigation = MDS_MITIGATION_VMWERV;
251
252                 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
253
254                 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
255                     (mds_nosmt || cpu_mitigations_auto_nosmt()))
256                         cpu_smt_disable(false);
257         }
258 }
259
260 static int __init mds_cmdline(char *str)
261 {
262         if (!boot_cpu_has_bug(X86_BUG_MDS))
263                 return 0;
264
265         if (!str)
266                 return -EINVAL;
267
268         if (!strcmp(str, "off"))
269                 mds_mitigation = MDS_MITIGATION_OFF;
270         else if (!strcmp(str, "full"))
271                 mds_mitigation = MDS_MITIGATION_FULL;
272         else if (!strcmp(str, "full,nosmt")) {
273                 mds_mitigation = MDS_MITIGATION_FULL;
274                 mds_nosmt = true;
275         }
276
277         return 0;
278 }
279 early_param("mds", mds_cmdline);
280
281 #undef pr_fmt
282 #define pr_fmt(fmt)     "TAA: " fmt
283
284 enum taa_mitigations {
285         TAA_MITIGATION_OFF,
286         TAA_MITIGATION_UCODE_NEEDED,
287         TAA_MITIGATION_VERW,
288         TAA_MITIGATION_TSX_DISABLED,
289 };
290
291 /* Default mitigation for TAA-affected CPUs */
292 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
293 static bool taa_nosmt __ro_after_init;
294
295 static const char * const taa_strings[] = {
296         [TAA_MITIGATION_OFF]            = "Vulnerable",
297         [TAA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
298         [TAA_MITIGATION_VERW]           = "Mitigation: Clear CPU buffers",
299         [TAA_MITIGATION_TSX_DISABLED]   = "Mitigation: TSX disabled",
300 };
301
302 static void __init taa_select_mitigation(void)
303 {
304         u64 ia32_cap;
305
306         if (!boot_cpu_has_bug(X86_BUG_TAA)) {
307                 taa_mitigation = TAA_MITIGATION_OFF;
308                 return;
309         }
310
311         /* TSX previously disabled by tsx=off */
312         if (!boot_cpu_has(X86_FEATURE_RTM)) {
313                 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
314                 return;
315         }
316
317         if (cpu_mitigations_off()) {
318                 taa_mitigation = TAA_MITIGATION_OFF;
319                 return;
320         }
321
322         /*
323          * TAA mitigation via VERW is turned off if both
324          * tsx_async_abort=off and mds=off are specified.
325          */
326         if (taa_mitigation == TAA_MITIGATION_OFF &&
327             mds_mitigation == MDS_MITIGATION_OFF)
328                 return;
329
330         if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
331                 taa_mitigation = TAA_MITIGATION_VERW;
332         else
333                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
334
335         /*
336          * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
337          * A microcode update fixes this behavior to clear CPU buffers. It also
338          * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
339          * ARCH_CAP_TSX_CTRL_MSR bit.
340          *
341          * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
342          * update is required.
343          */
344         ia32_cap = x86_read_arch_cap_msr();
345         if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
346             !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
347                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
348
349         /*
350          * TSX is enabled, select alternate mitigation for TAA which is
351          * the same as MDS. Enable MDS static branch to clear CPU buffers.
352          *
353          * For guests that can't determine whether the correct microcode is
354          * present on host, enable the mitigation for UCODE_NEEDED as well.
355          */
356         setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
357
358         if (taa_nosmt || cpu_mitigations_auto_nosmt())
359                 cpu_smt_disable(false);
360 }
361
362 static int __init tsx_async_abort_parse_cmdline(char *str)
363 {
364         if (!boot_cpu_has_bug(X86_BUG_TAA))
365                 return 0;
366
367         if (!str)
368                 return -EINVAL;
369
370         if (!strcmp(str, "off")) {
371                 taa_mitigation = TAA_MITIGATION_OFF;
372         } else if (!strcmp(str, "full")) {
373                 taa_mitigation = TAA_MITIGATION_VERW;
374         } else if (!strcmp(str, "full,nosmt")) {
375                 taa_mitigation = TAA_MITIGATION_VERW;
376                 taa_nosmt = true;
377         }
378
379         return 0;
380 }
381 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
382
383 #undef pr_fmt
384 #define pr_fmt(fmt)     "MMIO Stale Data: " fmt
385
386 enum mmio_mitigations {
387         MMIO_MITIGATION_OFF,
388         MMIO_MITIGATION_UCODE_NEEDED,
389         MMIO_MITIGATION_VERW,
390 };
391
392 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
393 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
394 static bool mmio_nosmt __ro_after_init = false;
395
396 static const char * const mmio_strings[] = {
397         [MMIO_MITIGATION_OFF]           = "Vulnerable",
398         [MMIO_MITIGATION_UCODE_NEEDED]  = "Vulnerable: Clear CPU buffers attempted, no microcode",
399         [MMIO_MITIGATION_VERW]          = "Mitigation: Clear CPU buffers",
400 };
401
402 static void __init mmio_select_mitigation(void)
403 {
404         u64 ia32_cap;
405
406         if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
407              boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
408              cpu_mitigations_off()) {
409                 mmio_mitigation = MMIO_MITIGATION_OFF;
410                 return;
411         }
412
413         if (mmio_mitigation == MMIO_MITIGATION_OFF)
414                 return;
415
416         ia32_cap = x86_read_arch_cap_msr();
417
418         /*
419          * Enable CPU buffer clear mitigation for host and VMM, if also affected
420          * by MDS or TAA. Otherwise, enable mitigation for VMM only.
421          */
422         if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
423                                               boot_cpu_has(X86_FEATURE_RTM)))
424                 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
425
426         /*
427          * X86_FEATURE_CLEAR_CPU_BUF could be enabled by other VERW based
428          * mitigations, disable KVM-only mitigation in that case.
429          */
430         if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
431                 static_branch_disable(&mmio_stale_data_clear);
432         else
433                 static_branch_enable(&mmio_stale_data_clear);
434
435         /*
436          * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
437          * be propagated to uncore buffers, clearing the Fill buffers on idle
438          * is required irrespective of SMT state.
439          */
440         if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
441                 static_branch_enable(&mds_idle_clear);
442
443         /*
444          * Check if the system has the right microcode.
445          *
446          * CPU Fill buffer clear mitigation is enumerated by either an explicit
447          * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
448          * affected systems.
449          */
450         if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
451             (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
452              boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
453              !(ia32_cap & ARCH_CAP_MDS_NO)))
454                 mmio_mitigation = MMIO_MITIGATION_VERW;
455         else
456                 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
457
458         if (mmio_nosmt || cpu_mitigations_auto_nosmt())
459                 cpu_smt_disable(false);
460 }
461
462 static int __init mmio_stale_data_parse_cmdline(char *str)
463 {
464         if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
465                 return 0;
466
467         if (!str)
468                 return -EINVAL;
469
470         if (!strcmp(str, "off")) {
471                 mmio_mitigation = MMIO_MITIGATION_OFF;
472         } else if (!strcmp(str, "full")) {
473                 mmio_mitigation = MMIO_MITIGATION_VERW;
474         } else if (!strcmp(str, "full,nosmt")) {
475                 mmio_mitigation = MMIO_MITIGATION_VERW;
476                 mmio_nosmt = true;
477         }
478
479         return 0;
480 }
481 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
482
483 #undef pr_fmt
484 #define pr_fmt(fmt)     "Register File Data Sampling: " fmt
485
486 enum rfds_mitigations {
487         RFDS_MITIGATION_OFF,
488         RFDS_MITIGATION_VERW,
489         RFDS_MITIGATION_UCODE_NEEDED,
490 };
491
492 /* Default mitigation for Register File Data Sampling */
493 static enum rfds_mitigations rfds_mitigation __ro_after_init =
494         IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_VERW : RFDS_MITIGATION_OFF;
495
496 static const char * const rfds_strings[] = {
497         [RFDS_MITIGATION_OFF]                   = "Vulnerable",
498         [RFDS_MITIGATION_VERW]                  = "Mitigation: Clear Register File",
499         [RFDS_MITIGATION_UCODE_NEEDED]          = "Vulnerable: No microcode",
500 };
501
502 static void __init rfds_select_mitigation(void)
503 {
504         if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) {
505                 rfds_mitigation = RFDS_MITIGATION_OFF;
506                 return;
507         }
508         if (rfds_mitigation == RFDS_MITIGATION_OFF)
509                 return;
510
511         if (x86_read_arch_cap_msr() & ARCH_CAP_RFDS_CLEAR)
512                 setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
513         else
514                 rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED;
515 }
516
517 static __init int rfds_parse_cmdline(char *str)
518 {
519         if (!str)
520                 return -EINVAL;
521
522         if (!boot_cpu_has_bug(X86_BUG_RFDS))
523                 return 0;
524
525         if (!strcmp(str, "off"))
526                 rfds_mitigation = RFDS_MITIGATION_OFF;
527         else if (!strcmp(str, "on"))
528                 rfds_mitigation = RFDS_MITIGATION_VERW;
529
530         return 0;
531 }
532 early_param("reg_file_data_sampling", rfds_parse_cmdline);
533
534 #undef pr_fmt
535 #define pr_fmt(fmt)     "" fmt
536
537 static void __init md_clear_update_mitigation(void)
538 {
539         if (cpu_mitigations_off())
540                 return;
541
542         if (!boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
543                 goto out;
544
545         /*
546          * X86_FEATURE_CLEAR_CPU_BUF is now enabled. Update MDS, TAA and MMIO
547          * Stale Data mitigation, if necessary.
548          */
549         if (mds_mitigation == MDS_MITIGATION_OFF &&
550             boot_cpu_has_bug(X86_BUG_MDS)) {
551                 mds_mitigation = MDS_MITIGATION_FULL;
552                 mds_select_mitigation();
553         }
554         if (taa_mitigation == TAA_MITIGATION_OFF &&
555             boot_cpu_has_bug(X86_BUG_TAA)) {
556                 taa_mitigation = TAA_MITIGATION_VERW;
557                 taa_select_mitigation();
558         }
559         /*
560          * MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
561          * gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
562          */
563         if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
564                 mmio_mitigation = MMIO_MITIGATION_VERW;
565                 mmio_select_mitigation();
566         }
567         if (rfds_mitigation == RFDS_MITIGATION_OFF &&
568             boot_cpu_has_bug(X86_BUG_RFDS)) {
569                 rfds_mitigation = RFDS_MITIGATION_VERW;
570                 rfds_select_mitigation();
571         }
572 out:
573         if (boot_cpu_has_bug(X86_BUG_MDS))
574                 pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
575         if (boot_cpu_has_bug(X86_BUG_TAA))
576                 pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
577         if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
578                 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
579         else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
580                 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
581         if (boot_cpu_has_bug(X86_BUG_RFDS))
582                 pr_info("Register File Data Sampling: %s\n", rfds_strings[rfds_mitigation]);
583 }
584
585 static void __init md_clear_select_mitigation(void)
586 {
587         mds_select_mitigation();
588         taa_select_mitigation();
589         mmio_select_mitigation();
590         rfds_select_mitigation();
591
592         /*
593          * As these mitigations are inter-related and rely on VERW instruction
594          * to clear the microarchitural buffers, update and print their status
595          * after mitigation selection is done for each of these vulnerabilities.
596          */
597         md_clear_update_mitigation();
598 }
599
600 #undef pr_fmt
601 #define pr_fmt(fmt)     "SRBDS: " fmt
602
603 enum srbds_mitigations {
604         SRBDS_MITIGATION_OFF,
605         SRBDS_MITIGATION_UCODE_NEEDED,
606         SRBDS_MITIGATION_FULL,
607         SRBDS_MITIGATION_TSX_OFF,
608         SRBDS_MITIGATION_HYPERVISOR,
609 };
610
611 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
612
613 static const char * const srbds_strings[] = {
614         [SRBDS_MITIGATION_OFF]          = "Vulnerable",
615         [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
616         [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
617         [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
618         [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
619 };
620
621 static bool srbds_off;
622
623 void update_srbds_msr(void)
624 {
625         u64 mcu_ctrl;
626
627         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
628                 return;
629
630         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
631                 return;
632
633         if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
634                 return;
635
636         /*
637          * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX
638          * being disabled and it hasn't received the SRBDS MSR microcode.
639          */
640         if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
641                 return;
642
643         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
644
645         switch (srbds_mitigation) {
646         case SRBDS_MITIGATION_OFF:
647         case SRBDS_MITIGATION_TSX_OFF:
648                 mcu_ctrl |= RNGDS_MITG_DIS;
649                 break;
650         case SRBDS_MITIGATION_FULL:
651                 mcu_ctrl &= ~RNGDS_MITG_DIS;
652                 break;
653         default:
654                 break;
655         }
656
657         wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
658 }
659
660 static void __init srbds_select_mitigation(void)
661 {
662         u64 ia32_cap;
663
664         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
665                 return;
666
667         /*
668          * Check to see if this is one of the MDS_NO systems supporting TSX that
669          * are only exposed to SRBDS when TSX is enabled or when CPU is affected
670          * by Processor MMIO Stale Data vulnerability.
671          */
672         ia32_cap = x86_read_arch_cap_msr();
673         if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
674             !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
675                 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
676         else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
677                 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
678         else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
679                 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
680         else if (cpu_mitigations_off() || srbds_off)
681                 srbds_mitigation = SRBDS_MITIGATION_OFF;
682
683         update_srbds_msr();
684         pr_info("%s\n", srbds_strings[srbds_mitigation]);
685 }
686
687 static int __init srbds_parse_cmdline(char *str)
688 {
689         if (!str)
690                 return -EINVAL;
691
692         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
693                 return 0;
694
695         srbds_off = !strcmp(str, "off");
696         return 0;
697 }
698 early_param("srbds", srbds_parse_cmdline);
699
700 #undef pr_fmt
701 #define pr_fmt(fmt)     "L1D Flush : " fmt
702
703 enum l1d_flush_mitigations {
704         L1D_FLUSH_OFF = 0,
705         L1D_FLUSH_ON,
706 };
707
708 static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF;
709
710 static void __init l1d_flush_select_mitigation(void)
711 {
712         if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D))
713                 return;
714
715         static_branch_enable(&switch_mm_cond_l1d_flush);
716         pr_info("Conditional flush on switch_mm() enabled\n");
717 }
718
719 static int __init l1d_flush_parse_cmdline(char *str)
720 {
721         if (!strcmp(str, "on"))
722                 l1d_flush_mitigation = L1D_FLUSH_ON;
723
724         return 0;
725 }
726 early_param("l1d_flush", l1d_flush_parse_cmdline);
727
728 #undef pr_fmt
729 #define pr_fmt(fmt)     "GDS: " fmt
730
731 enum gds_mitigations {
732         GDS_MITIGATION_OFF,
733         GDS_MITIGATION_UCODE_NEEDED,
734         GDS_MITIGATION_FORCE,
735         GDS_MITIGATION_FULL,
736         GDS_MITIGATION_FULL_LOCKED,
737         GDS_MITIGATION_HYPERVISOR,
738 };
739
740 #if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE)
741 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
742 #else
743 static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
744 #endif
745
746 static const char * const gds_strings[] = {
747         [GDS_MITIGATION_OFF]            = "Vulnerable",
748         [GDS_MITIGATION_UCODE_NEEDED]   = "Vulnerable: No microcode",
749         [GDS_MITIGATION_FORCE]          = "Mitigation: AVX disabled, no microcode",
750         [GDS_MITIGATION_FULL]           = "Mitigation: Microcode",
751         [GDS_MITIGATION_FULL_LOCKED]    = "Mitigation: Microcode (locked)",
752         [GDS_MITIGATION_HYPERVISOR]     = "Unknown: Dependent on hypervisor status",
753 };
754
755 bool gds_ucode_mitigated(void)
756 {
757         return (gds_mitigation == GDS_MITIGATION_FULL ||
758                 gds_mitigation == GDS_MITIGATION_FULL_LOCKED);
759 }
760 EXPORT_SYMBOL_GPL(gds_ucode_mitigated);
761
762 void update_gds_msr(void)
763 {
764         u64 mcu_ctrl_after;
765         u64 mcu_ctrl;
766
767         switch (gds_mitigation) {
768         case GDS_MITIGATION_OFF:
769                 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
770                 mcu_ctrl |= GDS_MITG_DIS;
771                 break;
772         case GDS_MITIGATION_FULL_LOCKED:
773                 /*
774                  * The LOCKED state comes from the boot CPU. APs might not have
775                  * the same state. Make sure the mitigation is enabled on all
776                  * CPUs.
777                  */
778         case GDS_MITIGATION_FULL:
779                 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
780                 mcu_ctrl &= ~GDS_MITG_DIS;
781                 break;
782         case GDS_MITIGATION_FORCE:
783         case GDS_MITIGATION_UCODE_NEEDED:
784         case GDS_MITIGATION_HYPERVISOR:
785                 return;
786         }
787
788         wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
789
790         /*
791          * Check to make sure that the WRMSR value was not ignored. Writes to
792          * GDS_MITG_DIS will be ignored if this processor is locked but the boot
793          * processor was not.
794          */
795         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after);
796         WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after);
797 }
798
799 static void __init gds_select_mitigation(void)
800 {
801         u64 mcu_ctrl;
802
803         if (!boot_cpu_has_bug(X86_BUG_GDS))
804                 return;
805
806         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
807                 gds_mitigation = GDS_MITIGATION_HYPERVISOR;
808                 goto out;
809         }
810
811         if (cpu_mitigations_off())
812                 gds_mitigation = GDS_MITIGATION_OFF;
813         /* Will verify below that mitigation _can_ be disabled */
814
815         /* No microcode */
816         if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) {
817                 if (gds_mitigation == GDS_MITIGATION_FORCE) {
818                         /*
819                          * This only needs to be done on the boot CPU so do it
820                          * here rather than in update_gds_msr()
821                          */
822                         setup_clear_cpu_cap(X86_FEATURE_AVX);
823                         pr_warn("Microcode update needed! Disabling AVX as mitigation.\n");
824                 } else {
825                         gds_mitigation = GDS_MITIGATION_UCODE_NEEDED;
826                 }
827                 goto out;
828         }
829
830         /* Microcode has mitigation, use it */
831         if (gds_mitigation == GDS_MITIGATION_FORCE)
832                 gds_mitigation = GDS_MITIGATION_FULL;
833
834         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
835         if (mcu_ctrl & GDS_MITG_LOCKED) {
836                 if (gds_mitigation == GDS_MITIGATION_OFF)
837                         pr_warn("Mitigation locked. Disable failed.\n");
838
839                 /*
840                  * The mitigation is selected from the boot CPU. All other CPUs
841                  * _should_ have the same state. If the boot CPU isn't locked
842                  * but others are then update_gds_msr() will WARN() of the state
843                  * mismatch. If the boot CPU is locked update_gds_msr() will
844                  * ensure the other CPUs have the mitigation enabled.
845                  */
846                 gds_mitigation = GDS_MITIGATION_FULL_LOCKED;
847         }
848
849         update_gds_msr();
850 out:
851         pr_info("%s\n", gds_strings[gds_mitigation]);
852 }
853
854 static int __init gds_parse_cmdline(char *str)
855 {
856         if (!str)
857                 return -EINVAL;
858
859         if (!boot_cpu_has_bug(X86_BUG_GDS))
860                 return 0;
861
862         if (!strcmp(str, "off"))
863                 gds_mitigation = GDS_MITIGATION_OFF;
864         else if (!strcmp(str, "force"))
865                 gds_mitigation = GDS_MITIGATION_FORCE;
866
867         return 0;
868 }
869 early_param("gather_data_sampling", gds_parse_cmdline);
870
871 #undef pr_fmt
872 #define pr_fmt(fmt)     "Spectre V1 : " fmt
873
874 enum spectre_v1_mitigation {
875         SPECTRE_V1_MITIGATION_NONE,
876         SPECTRE_V1_MITIGATION_AUTO,
877 };
878
879 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
880         SPECTRE_V1_MITIGATION_AUTO;
881
882 static const char * const spectre_v1_strings[] = {
883         [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
884         [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
885 };
886
887 /*
888  * Does SMAP provide full mitigation against speculative kernel access to
889  * userspace?
890  */
891 static bool smap_works_speculatively(void)
892 {
893         if (!boot_cpu_has(X86_FEATURE_SMAP))
894                 return false;
895
896         /*
897          * On CPUs which are vulnerable to Meltdown, SMAP does not
898          * prevent speculative access to user data in the L1 cache.
899          * Consider SMAP to be non-functional as a mitigation on these
900          * CPUs.
901          */
902         if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
903                 return false;
904
905         return true;
906 }
907
908 static void __init spectre_v1_select_mitigation(void)
909 {
910         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
911                 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
912                 return;
913         }
914
915         if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
916                 /*
917                  * With Spectre v1, a user can speculatively control either
918                  * path of a conditional swapgs with a user-controlled GS
919                  * value.  The mitigation is to add lfences to both code paths.
920                  *
921                  * If FSGSBASE is enabled, the user can put a kernel address in
922                  * GS, in which case SMAP provides no protection.
923                  *
924                  * If FSGSBASE is disabled, the user can only put a user space
925                  * address in GS.  That makes an attack harder, but still
926                  * possible if there's no SMAP protection.
927                  */
928                 if (boot_cpu_has(X86_FEATURE_FSGSBASE) ||
929                     !smap_works_speculatively()) {
930                         /*
931                          * Mitigation can be provided from SWAPGS itself or
932                          * PTI as the CR3 write in the Meltdown mitigation
933                          * is serializing.
934                          *
935                          * If neither is there, mitigate with an LFENCE to
936                          * stop speculation through swapgs.
937                          */
938                         if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
939                             !boot_cpu_has(X86_FEATURE_PTI))
940                                 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
941
942                         /*
943                          * Enable lfences in the kernel entry (non-swapgs)
944                          * paths, to prevent user entry from speculatively
945                          * skipping swapgs.
946                          */
947                         setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
948                 }
949         }
950
951         pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
952 }
953
954 static int __init nospectre_v1_cmdline(char *str)
955 {
956         spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
957         return 0;
958 }
959 early_param("nospectre_v1", nospectre_v1_cmdline);
960
961 enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE;
962
963 #undef pr_fmt
964 #define pr_fmt(fmt)     "RETBleed: " fmt
965
966 enum retbleed_mitigation {
967         RETBLEED_MITIGATION_NONE,
968         RETBLEED_MITIGATION_UNRET,
969         RETBLEED_MITIGATION_IBPB,
970         RETBLEED_MITIGATION_IBRS,
971         RETBLEED_MITIGATION_EIBRS,
972         RETBLEED_MITIGATION_STUFF,
973 };
974
975 enum retbleed_mitigation_cmd {
976         RETBLEED_CMD_OFF,
977         RETBLEED_CMD_AUTO,
978         RETBLEED_CMD_UNRET,
979         RETBLEED_CMD_IBPB,
980         RETBLEED_CMD_STUFF,
981 };
982
983 static const char * const retbleed_strings[] = {
984         [RETBLEED_MITIGATION_NONE]      = "Vulnerable",
985         [RETBLEED_MITIGATION_UNRET]     = "Mitigation: untrained return thunk",
986         [RETBLEED_MITIGATION_IBPB]      = "Mitigation: IBPB",
987         [RETBLEED_MITIGATION_IBRS]      = "Mitigation: IBRS",
988         [RETBLEED_MITIGATION_EIBRS]     = "Mitigation: Enhanced IBRS",
989         [RETBLEED_MITIGATION_STUFF]     = "Mitigation: Stuffing",
990 };
991
992 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
993         RETBLEED_MITIGATION_NONE;
994 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
995         RETBLEED_CMD_AUTO;
996
997 static int __ro_after_init retbleed_nosmt = false;
998
999 static int __init retbleed_parse_cmdline(char *str)
1000 {
1001         if (!str)
1002                 return -EINVAL;
1003
1004         while (str) {
1005                 char *next = strchr(str, ',');
1006                 if (next) {
1007                         *next = 0;
1008                         next++;
1009                 }
1010
1011                 if (!strcmp(str, "off")) {
1012                         retbleed_cmd = RETBLEED_CMD_OFF;
1013                 } else if (!strcmp(str, "auto")) {
1014                         retbleed_cmd = RETBLEED_CMD_AUTO;
1015                 } else if (!strcmp(str, "unret")) {
1016                         retbleed_cmd = RETBLEED_CMD_UNRET;
1017                 } else if (!strcmp(str, "ibpb")) {
1018                         retbleed_cmd = RETBLEED_CMD_IBPB;
1019                 } else if (!strcmp(str, "stuff")) {
1020                         retbleed_cmd = RETBLEED_CMD_STUFF;
1021                 } else if (!strcmp(str, "nosmt")) {
1022                         retbleed_nosmt = true;
1023                 } else if (!strcmp(str, "force")) {
1024                         setup_force_cpu_bug(X86_BUG_RETBLEED);
1025                 } else {
1026                         pr_err("Ignoring unknown retbleed option (%s).", str);
1027                 }
1028
1029                 str = next;
1030         }
1031
1032         return 0;
1033 }
1034 early_param("retbleed", retbleed_parse_cmdline);
1035
1036 #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
1037 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
1038
1039 static void __init retbleed_select_mitigation(void)
1040 {
1041         bool mitigate_smt = false;
1042
1043         if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
1044                 return;
1045
1046         switch (retbleed_cmd) {
1047         case RETBLEED_CMD_OFF:
1048                 return;
1049
1050         case RETBLEED_CMD_UNRET:
1051                 if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) {
1052                         retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1053                 } else {
1054                         pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n");
1055                         goto do_cmd_auto;
1056                 }
1057                 break;
1058
1059         case RETBLEED_CMD_IBPB:
1060                 if (!boot_cpu_has(X86_FEATURE_IBPB)) {
1061                         pr_err("WARNING: CPU does not support IBPB.\n");
1062                         goto do_cmd_auto;
1063                 } else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
1064                         retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1065                 } else {
1066                         pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
1067                         goto do_cmd_auto;
1068                 }
1069                 break;
1070
1071         case RETBLEED_CMD_STUFF:
1072                 if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) &&
1073                     spectre_v2_enabled == SPECTRE_V2_RETPOLINE) {
1074                         retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
1075
1076                 } else {
1077                         if (IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))
1078                                 pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n");
1079                         else
1080                                 pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n");
1081
1082                         goto do_cmd_auto;
1083                 }
1084                 break;
1085
1086 do_cmd_auto:
1087         case RETBLEED_CMD_AUTO:
1088                 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1089                     boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
1090                         if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY))
1091                                 retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
1092                         else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) &&
1093                                  boot_cpu_has(X86_FEATURE_IBPB))
1094                                 retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
1095                 }
1096
1097                 /*
1098                  * The Intel mitigation (IBRS or eIBRS) was already selected in
1099                  * spectre_v2_select_mitigation().  'retbleed_mitigation' will
1100                  * be set accordingly below.
1101                  */
1102
1103                 break;
1104         }
1105
1106         switch (retbleed_mitigation) {
1107         case RETBLEED_MITIGATION_UNRET:
1108                 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1109                 setup_force_cpu_cap(X86_FEATURE_UNRET);
1110
1111                 x86_return_thunk = retbleed_return_thunk;
1112
1113                 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
1114                     boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
1115                         pr_err(RETBLEED_UNTRAIN_MSG);
1116
1117                 mitigate_smt = true;
1118                 break;
1119
1120         case RETBLEED_MITIGATION_IBPB:
1121                 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
1122                 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
1123                 mitigate_smt = true;
1124                 break;
1125
1126         case RETBLEED_MITIGATION_STUFF:
1127                 setup_force_cpu_cap(X86_FEATURE_RETHUNK);
1128                 setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
1129
1130                 x86_return_thunk = call_depth_return_thunk;
1131                 break;
1132
1133         default:
1134                 break;
1135         }
1136
1137         if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) &&
1138             (retbleed_nosmt || cpu_mitigations_auto_nosmt()))
1139                 cpu_smt_disable(false);
1140
1141         /*
1142          * Let IBRS trump all on Intel without affecting the effects of the
1143          * retbleed= cmdline option except for call depth based stuffing
1144          */
1145         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1146                 switch (spectre_v2_enabled) {
1147                 case SPECTRE_V2_IBRS:
1148                         retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
1149                         break;
1150                 case SPECTRE_V2_EIBRS:
1151                 case SPECTRE_V2_EIBRS_RETPOLINE:
1152                 case SPECTRE_V2_EIBRS_LFENCE:
1153                         retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
1154                         break;
1155                 default:
1156                         if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF)
1157                                 pr_err(RETBLEED_INTEL_MSG);
1158                 }
1159         }
1160
1161         pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
1162 }
1163
1164 #undef pr_fmt
1165 #define pr_fmt(fmt)     "Spectre V2 : " fmt
1166
1167 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
1168         SPECTRE_V2_USER_NONE;
1169 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
1170         SPECTRE_V2_USER_NONE;
1171
1172 #ifdef CONFIG_MITIGATION_RETPOLINE
1173 static bool spectre_v2_bad_module;
1174
1175 bool retpoline_module_ok(bool has_retpoline)
1176 {
1177         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
1178                 return true;
1179
1180         pr_err("System may be vulnerable to spectre v2\n");
1181         spectre_v2_bad_module = true;
1182         return false;
1183 }
1184
1185 static inline const char *spectre_v2_module_string(void)
1186 {
1187         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
1188 }
1189 #else
1190 static inline const char *spectre_v2_module_string(void) { return ""; }
1191 #endif
1192
1193 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
1194 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
1195 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
1196 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
1197
1198 #ifdef CONFIG_BPF_SYSCALL
1199 void unpriv_ebpf_notify(int new_state)
1200 {
1201         if (new_state)
1202                 return;
1203
1204         /* Unprivileged eBPF is enabled */
1205
1206         switch (spectre_v2_enabled) {
1207         case SPECTRE_V2_EIBRS:
1208                 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1209                 break;
1210         case SPECTRE_V2_EIBRS_LFENCE:
1211                 if (sched_smt_active())
1212                         pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1213                 break;
1214         default:
1215                 break;
1216         }
1217 }
1218 #endif
1219
1220 static inline bool match_option(const char *arg, int arglen, const char *opt)
1221 {
1222         int len = strlen(opt);
1223
1224         return len == arglen && !strncmp(arg, opt, len);
1225 }
1226
1227 /* The kernel command line selection for spectre v2 */
1228 enum spectre_v2_mitigation_cmd {
1229         SPECTRE_V2_CMD_NONE,
1230         SPECTRE_V2_CMD_AUTO,
1231         SPECTRE_V2_CMD_FORCE,
1232         SPECTRE_V2_CMD_RETPOLINE,
1233         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
1234         SPECTRE_V2_CMD_RETPOLINE_LFENCE,
1235         SPECTRE_V2_CMD_EIBRS,
1236         SPECTRE_V2_CMD_EIBRS_RETPOLINE,
1237         SPECTRE_V2_CMD_EIBRS_LFENCE,
1238         SPECTRE_V2_CMD_IBRS,
1239 };
1240
1241 enum spectre_v2_user_cmd {
1242         SPECTRE_V2_USER_CMD_NONE,
1243         SPECTRE_V2_USER_CMD_AUTO,
1244         SPECTRE_V2_USER_CMD_FORCE,
1245         SPECTRE_V2_USER_CMD_PRCTL,
1246         SPECTRE_V2_USER_CMD_PRCTL_IBPB,
1247         SPECTRE_V2_USER_CMD_SECCOMP,
1248         SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
1249 };
1250
1251 static const char * const spectre_v2_user_strings[] = {
1252         [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
1253         [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
1254         [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
1255         [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
1256         [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
1257 };
1258
1259 static const struct {
1260         const char                      *option;
1261         enum spectre_v2_user_cmd        cmd;
1262         bool                            secure;
1263 } v2_user_options[] __initconst = {
1264         { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
1265         { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
1266         { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
1267         { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
1268         { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
1269         { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
1270         { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
1271 };
1272
1273 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
1274 {
1275         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1276                 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
1277 }
1278
1279 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
1280
1281 static enum spectre_v2_user_cmd __init
1282 spectre_v2_parse_user_cmdline(void)
1283 {
1284         char arg[20];
1285         int ret, i;
1286
1287         switch (spectre_v2_cmd) {
1288         case SPECTRE_V2_CMD_NONE:
1289                 return SPECTRE_V2_USER_CMD_NONE;
1290         case SPECTRE_V2_CMD_FORCE:
1291                 return SPECTRE_V2_USER_CMD_FORCE;
1292         default:
1293                 break;
1294         }
1295
1296         ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
1297                                   arg, sizeof(arg));
1298         if (ret < 0)
1299                 return SPECTRE_V2_USER_CMD_AUTO;
1300
1301         for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
1302                 if (match_option(arg, ret, v2_user_options[i].option)) {
1303                         spec_v2_user_print_cond(v2_user_options[i].option,
1304                                                 v2_user_options[i].secure);
1305                         return v2_user_options[i].cmd;
1306                 }
1307         }
1308
1309         pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
1310         return SPECTRE_V2_USER_CMD_AUTO;
1311 }
1312
1313 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
1314 {
1315         return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS;
1316 }
1317
1318 static void __init
1319 spectre_v2_user_select_mitigation(void)
1320 {
1321         enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
1322         bool smt_possible = IS_ENABLED(CONFIG_SMP);
1323         enum spectre_v2_user_cmd cmd;
1324
1325         if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
1326                 return;
1327
1328         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
1329             cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
1330                 smt_possible = false;
1331
1332         cmd = spectre_v2_parse_user_cmdline();
1333         switch (cmd) {
1334         case SPECTRE_V2_USER_CMD_NONE:
1335                 goto set_mode;
1336         case SPECTRE_V2_USER_CMD_FORCE:
1337                 mode = SPECTRE_V2_USER_STRICT;
1338                 break;
1339         case SPECTRE_V2_USER_CMD_AUTO:
1340         case SPECTRE_V2_USER_CMD_PRCTL:
1341         case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1342                 mode = SPECTRE_V2_USER_PRCTL;
1343                 break;
1344         case SPECTRE_V2_USER_CMD_SECCOMP:
1345         case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1346                 if (IS_ENABLED(CONFIG_SECCOMP))
1347                         mode = SPECTRE_V2_USER_SECCOMP;
1348                 else
1349                         mode = SPECTRE_V2_USER_PRCTL;
1350                 break;
1351         }
1352
1353         /* Initialize Indirect Branch Prediction Barrier */
1354         if (boot_cpu_has(X86_FEATURE_IBPB)) {
1355                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1356
1357                 spectre_v2_user_ibpb = mode;
1358                 switch (cmd) {
1359                 case SPECTRE_V2_USER_CMD_NONE:
1360                         break;
1361                 case SPECTRE_V2_USER_CMD_FORCE:
1362                 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1363                 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1364                         static_branch_enable(&switch_mm_always_ibpb);
1365                         spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1366                         break;
1367                 case SPECTRE_V2_USER_CMD_PRCTL:
1368                 case SPECTRE_V2_USER_CMD_AUTO:
1369                 case SPECTRE_V2_USER_CMD_SECCOMP:
1370                         static_branch_enable(&switch_mm_cond_ibpb);
1371                         break;
1372                 }
1373
1374                 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1375                         static_key_enabled(&switch_mm_always_ibpb) ?
1376                         "always-on" : "conditional");
1377         }
1378
1379         /*
1380          * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP
1381          * is not required.
1382          *
1383          * Intel's Enhanced IBRS also protects against cross-thread branch target
1384          * injection in user-mode as the IBRS bit remains always set which
1385          * implicitly enables cross-thread protections.  However, in legacy IBRS
1386          * mode, the IBRS bit is set only on kernel entry and cleared on return
1387          * to userspace.  AMD Automatic IBRS also does not protect userspace.
1388          * These modes therefore disable the implicit cross-thread protection,
1389          * so allow for STIBP to be selected in those cases.
1390          */
1391         if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1392             !smt_possible ||
1393             (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
1394              !boot_cpu_has(X86_FEATURE_AUTOIBRS)))
1395                 return;
1396
1397         /*
1398          * At this point, an STIBP mode other than "off" has been set.
1399          * If STIBP support is not being forced, check if STIBP always-on
1400          * is preferred.
1401          */
1402         if (mode != SPECTRE_V2_USER_STRICT &&
1403             boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1404                 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1405
1406         if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
1407             retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
1408                 if (mode != SPECTRE_V2_USER_STRICT &&
1409                     mode != SPECTRE_V2_USER_STRICT_PREFERRED)
1410                         pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n");
1411                 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1412         }
1413
1414         spectre_v2_user_stibp = mode;
1415
1416 set_mode:
1417         pr_info("%s\n", spectre_v2_user_strings[mode]);
1418 }
1419
1420 static const char * const spectre_v2_strings[] = {
1421         [SPECTRE_V2_NONE]                       = "Vulnerable",
1422         [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
1423         [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
1424         [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced / Automatic IBRS",
1425         [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced / Automatic IBRS + LFENCE",
1426         [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced / Automatic IBRS + Retpolines",
1427         [SPECTRE_V2_IBRS]                       = "Mitigation: IBRS",
1428 };
1429
1430 static const struct {
1431         const char *option;
1432         enum spectre_v2_mitigation_cmd cmd;
1433         bool secure;
1434 } mitigation_options[] __initconst = {
1435         { "off",                SPECTRE_V2_CMD_NONE,              false },
1436         { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
1437         { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
1438         { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1439         { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1440         { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1441         { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
1442         { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
1443         { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
1444         { "auto",               SPECTRE_V2_CMD_AUTO,              false },
1445         { "ibrs",               SPECTRE_V2_CMD_IBRS,              false },
1446 };
1447
1448 static void __init spec_v2_print_cond(const char *reason, bool secure)
1449 {
1450         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1451                 pr_info("%s selected on command line.\n", reason);
1452 }
1453
1454 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1455 {
1456         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1457         char arg[20];
1458         int ret, i;
1459
1460         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1461             cpu_mitigations_off())
1462                 return SPECTRE_V2_CMD_NONE;
1463
1464         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1465         if (ret < 0)
1466                 return SPECTRE_V2_CMD_AUTO;
1467
1468         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1469                 if (!match_option(arg, ret, mitigation_options[i].option))
1470                         continue;
1471                 cmd = mitigation_options[i].cmd;
1472                 break;
1473         }
1474
1475         if (i >= ARRAY_SIZE(mitigation_options)) {
1476                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1477                 return SPECTRE_V2_CMD_AUTO;
1478         }
1479
1480         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1481              cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1482              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1483              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1484              cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1485             !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1486                 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1487                        mitigation_options[i].option);
1488                 return SPECTRE_V2_CMD_AUTO;
1489         }
1490
1491         if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1492              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1493              cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1494             !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1495                 pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n",
1496                        mitigation_options[i].option);
1497                 return SPECTRE_V2_CMD_AUTO;
1498         }
1499
1500         if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1501              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1502             !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1503                 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1504                        mitigation_options[i].option);
1505                 return SPECTRE_V2_CMD_AUTO;
1506         }
1507
1508         if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) {
1509                 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1510                        mitigation_options[i].option);
1511                 return SPECTRE_V2_CMD_AUTO;
1512         }
1513
1514         if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1515                 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1516                        mitigation_options[i].option);
1517                 return SPECTRE_V2_CMD_AUTO;
1518         }
1519
1520         if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1521                 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1522                        mitigation_options[i].option);
1523                 return SPECTRE_V2_CMD_AUTO;
1524         }
1525
1526         if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) {
1527                 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1528                        mitigation_options[i].option);
1529                 return SPECTRE_V2_CMD_AUTO;
1530         }
1531
1532         spec_v2_print_cond(mitigation_options[i].option,
1533                            mitigation_options[i].secure);
1534         return cmd;
1535 }
1536
1537 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1538 {
1539         if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) {
1540                 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1541                 return SPECTRE_V2_NONE;
1542         }
1543
1544         return SPECTRE_V2_RETPOLINE;
1545 }
1546
1547 /* Disable in-kernel use of non-RSB RET predictors */
1548 static void __init spec_ctrl_disable_kernel_rrsba(void)
1549 {
1550         u64 ia32_cap;
1551
1552         if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1553                 return;
1554
1555         ia32_cap = x86_read_arch_cap_msr();
1556
1557         if (ia32_cap & ARCH_CAP_RRSBA) {
1558                 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1559                 update_spec_ctrl(x86_spec_ctrl_base);
1560         }
1561 }
1562
1563 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1564 {
1565         /*
1566          * Similar to context switches, there are two types of RSB attacks
1567          * after VM exit:
1568          *
1569          * 1) RSB underflow
1570          *
1571          * 2) Poisoned RSB entry
1572          *
1573          * When retpoline is enabled, both are mitigated by filling/clearing
1574          * the RSB.
1575          *
1576          * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1577          * prediction isolation protections, RSB still needs to be cleared
1578          * because of #2.  Note that SMEP provides no protection here, unlike
1579          * user-space-poisoned RSB entries.
1580          *
1581          * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1582          * bug is present then a LITE version of RSB protection is required,
1583          * just a single call needs to retire before a RET is executed.
1584          */
1585         switch (mode) {
1586         case SPECTRE_V2_NONE:
1587                 return;
1588
1589         case SPECTRE_V2_EIBRS_LFENCE:
1590         case SPECTRE_V2_EIBRS:
1591                 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
1592                         setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1593                         pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1594                 }
1595                 return;
1596
1597         case SPECTRE_V2_EIBRS_RETPOLINE:
1598         case SPECTRE_V2_RETPOLINE:
1599         case SPECTRE_V2_LFENCE:
1600         case SPECTRE_V2_IBRS:
1601                 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1602                 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1603                 return;
1604         }
1605
1606         pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1607         dump_stack();
1608 }
1609
1610 static void __init spectre_v2_select_mitigation(void)
1611 {
1612         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1613         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1614
1615         /*
1616          * If the CPU is not affected and the command line mode is NONE or AUTO
1617          * then nothing to do.
1618          */
1619         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1620             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1621                 return;
1622
1623         switch (cmd) {
1624         case SPECTRE_V2_CMD_NONE:
1625                 return;
1626
1627         case SPECTRE_V2_CMD_FORCE:
1628         case SPECTRE_V2_CMD_AUTO:
1629                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1630                         mode = SPECTRE_V2_EIBRS;
1631                         break;
1632                 }
1633
1634                 if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) &&
1635                     boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1636                     retbleed_cmd != RETBLEED_CMD_OFF &&
1637                     retbleed_cmd != RETBLEED_CMD_STUFF &&
1638                     boot_cpu_has(X86_FEATURE_IBRS) &&
1639                     boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1640                         mode = SPECTRE_V2_IBRS;
1641                         break;
1642                 }
1643
1644                 mode = spectre_v2_select_retpoline();
1645                 break;
1646
1647         case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1648                 pr_err(SPECTRE_V2_LFENCE_MSG);
1649                 mode = SPECTRE_V2_LFENCE;
1650                 break;
1651
1652         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1653                 mode = SPECTRE_V2_RETPOLINE;
1654                 break;
1655
1656         case SPECTRE_V2_CMD_RETPOLINE:
1657                 mode = spectre_v2_select_retpoline();
1658                 break;
1659
1660         case SPECTRE_V2_CMD_IBRS:
1661                 mode = SPECTRE_V2_IBRS;
1662                 break;
1663
1664         case SPECTRE_V2_CMD_EIBRS:
1665                 mode = SPECTRE_V2_EIBRS;
1666                 break;
1667
1668         case SPECTRE_V2_CMD_EIBRS_LFENCE:
1669                 mode = SPECTRE_V2_EIBRS_LFENCE;
1670                 break;
1671
1672         case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1673                 mode = SPECTRE_V2_EIBRS_RETPOLINE;
1674                 break;
1675         }
1676
1677         if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1678                 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1679
1680         if (spectre_v2_in_ibrs_mode(mode)) {
1681                 if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) {
1682                         msr_set_bit(MSR_EFER, _EFER_AUTOIBRS);
1683                 } else {
1684                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1685                         update_spec_ctrl(x86_spec_ctrl_base);
1686                 }
1687         }
1688
1689         switch (mode) {
1690         case SPECTRE_V2_NONE:
1691         case SPECTRE_V2_EIBRS:
1692                 break;
1693
1694         case SPECTRE_V2_IBRS:
1695                 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
1696                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1697                         pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
1698                 break;
1699
1700         case SPECTRE_V2_LFENCE:
1701         case SPECTRE_V2_EIBRS_LFENCE:
1702                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1703                 fallthrough;
1704
1705         case SPECTRE_V2_RETPOLINE:
1706         case SPECTRE_V2_EIBRS_RETPOLINE:
1707                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1708                 break;
1709         }
1710
1711         /*
1712          * Disable alternate RSB predictions in kernel when indirect CALLs and
1713          * JMPs gets protection against BHI and Intramode-BTI, but RET
1714          * prediction from a non-RSB predictor is still a risk.
1715          */
1716         if (mode == SPECTRE_V2_EIBRS_LFENCE ||
1717             mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1718             mode == SPECTRE_V2_RETPOLINE)
1719                 spec_ctrl_disable_kernel_rrsba();
1720
1721         spectre_v2_enabled = mode;
1722         pr_info("%s\n", spectre_v2_strings[mode]);
1723
1724         /*
1725          * If Spectre v2 protection has been enabled, fill the RSB during a
1726          * context switch.  In general there are two types of RSB attacks
1727          * across context switches, for which the CALLs/RETs may be unbalanced.
1728          *
1729          * 1) RSB underflow
1730          *
1731          *    Some Intel parts have "bottomless RSB".  When the RSB is empty,
1732          *    speculated return targets may come from the branch predictor,
1733          *    which could have a user-poisoned BTB or BHB entry.
1734          *
1735          *    AMD has it even worse: *all* returns are speculated from the BTB,
1736          *    regardless of the state of the RSB.
1737          *
1738          *    When IBRS or eIBRS is enabled, the "user -> kernel" attack
1739          *    scenario is mitigated by the IBRS branch prediction isolation
1740          *    properties, so the RSB buffer filling wouldn't be necessary to
1741          *    protect against this type of attack.
1742          *
1743          *    The "user -> user" attack scenario is mitigated by RSB filling.
1744          *
1745          * 2) Poisoned RSB entry
1746          *
1747          *    If the 'next' in-kernel return stack is shorter than 'prev',
1748          *    'next' could be tricked into speculating with a user-poisoned RSB
1749          *    entry.
1750          *
1751          *    The "user -> kernel" attack scenario is mitigated by SMEP and
1752          *    eIBRS.
1753          *
1754          *    The "user -> user" scenario, also known as SpectreBHB, requires
1755          *    RSB clearing.
1756          *
1757          * So to mitigate all cases, unconditionally fill RSB on context
1758          * switches.
1759          *
1760          * FIXME: Is this pointless for retbleed-affected AMD?
1761          */
1762         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1763         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1764
1765         spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
1766
1767         /*
1768          * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
1769          * and Enhanced IBRS protect firmware too, so enable IBRS around
1770          * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't
1771          * otherwise enabled.
1772          *
1773          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1774          * the user might select retpoline on the kernel command line and if
1775          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1776          * enable IBRS around firmware calls.
1777          */
1778         if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1779             boot_cpu_has(X86_FEATURE_IBPB) &&
1780             (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
1781              boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) {
1782
1783                 if (retbleed_cmd != RETBLEED_CMD_IBPB) {
1784                         setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW);
1785                         pr_info("Enabling Speculation Barrier for firmware calls\n");
1786                 }
1787
1788         } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
1789                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1790                 pr_info("Enabling Restricted Speculation for firmware calls\n");
1791         }
1792
1793         /* Set up IBPB and STIBP depending on the general spectre V2 command */
1794         spectre_v2_cmd = cmd;
1795 }
1796
1797 static void update_stibp_msr(void * __unused)
1798 {
1799         u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
1800         update_spec_ctrl(val);
1801 }
1802
1803 /* Update x86_spec_ctrl_base in case SMT state changed. */
1804 static void update_stibp_strict(void)
1805 {
1806         u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1807
1808         if (sched_smt_active())
1809                 mask |= SPEC_CTRL_STIBP;
1810
1811         if (mask == x86_spec_ctrl_base)
1812                 return;
1813
1814         pr_info("Update user space SMT mitigation: STIBP %s\n",
1815                 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1816         x86_spec_ctrl_base = mask;
1817         on_each_cpu(update_stibp_msr, NULL, 1);
1818 }
1819
1820 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1821 static void update_indir_branch_cond(void)
1822 {
1823         if (sched_smt_active())
1824                 static_branch_enable(&switch_to_cond_stibp);
1825         else
1826                 static_branch_disable(&switch_to_cond_stibp);
1827 }
1828
1829 #undef pr_fmt
1830 #define pr_fmt(fmt) fmt
1831
1832 /* Update the static key controlling the MDS CPU buffer clear in idle */
1833 static void update_mds_branch_idle(void)
1834 {
1835         u64 ia32_cap = x86_read_arch_cap_msr();
1836
1837         /*
1838          * Enable the idle clearing if SMT is active on CPUs which are
1839          * affected only by MSBDS and not any other MDS variant.
1840          *
1841          * The other variants cannot be mitigated when SMT is enabled, so
1842          * clearing the buffers on idle just to prevent the Store Buffer
1843          * repartitioning leak would be a window dressing exercise.
1844          */
1845         if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1846                 return;
1847
1848         if (sched_smt_active()) {
1849                 static_branch_enable(&mds_idle_clear);
1850         } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1851                    (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1852                 static_branch_disable(&mds_idle_clear);
1853         }
1854 }
1855
1856 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1857 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1858 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1859
1860 void cpu_bugs_smt_update(void)
1861 {
1862         mutex_lock(&spec_ctrl_mutex);
1863
1864         if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1865             spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1866                 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1867
1868         switch (spectre_v2_user_stibp) {
1869         case SPECTRE_V2_USER_NONE:
1870                 break;
1871         case SPECTRE_V2_USER_STRICT:
1872         case SPECTRE_V2_USER_STRICT_PREFERRED:
1873                 update_stibp_strict();
1874                 break;
1875         case SPECTRE_V2_USER_PRCTL:
1876         case SPECTRE_V2_USER_SECCOMP:
1877                 update_indir_branch_cond();
1878                 break;
1879         }
1880
1881         switch (mds_mitigation) {
1882         case MDS_MITIGATION_FULL:
1883         case MDS_MITIGATION_VMWERV:
1884                 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1885                         pr_warn_once(MDS_MSG_SMT);
1886                 update_mds_branch_idle();
1887                 break;
1888         case MDS_MITIGATION_OFF:
1889                 break;
1890         }
1891
1892         switch (taa_mitigation) {
1893         case TAA_MITIGATION_VERW:
1894         case TAA_MITIGATION_UCODE_NEEDED:
1895                 if (sched_smt_active())
1896                         pr_warn_once(TAA_MSG_SMT);
1897                 break;
1898         case TAA_MITIGATION_TSX_DISABLED:
1899         case TAA_MITIGATION_OFF:
1900                 break;
1901         }
1902
1903         switch (mmio_mitigation) {
1904         case MMIO_MITIGATION_VERW:
1905         case MMIO_MITIGATION_UCODE_NEEDED:
1906                 if (sched_smt_active())
1907                         pr_warn_once(MMIO_MSG_SMT);
1908                 break;
1909         case MMIO_MITIGATION_OFF:
1910                 break;
1911         }
1912
1913         mutex_unlock(&spec_ctrl_mutex);
1914 }
1915
1916 #undef pr_fmt
1917 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
1918
1919 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1920
1921 /* The kernel command line selection */
1922 enum ssb_mitigation_cmd {
1923         SPEC_STORE_BYPASS_CMD_NONE,
1924         SPEC_STORE_BYPASS_CMD_AUTO,
1925         SPEC_STORE_BYPASS_CMD_ON,
1926         SPEC_STORE_BYPASS_CMD_PRCTL,
1927         SPEC_STORE_BYPASS_CMD_SECCOMP,
1928 };
1929
1930 static const char * const ssb_strings[] = {
1931         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
1932         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
1933         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
1934         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1935 };
1936
1937 static const struct {
1938         const char *option;
1939         enum ssb_mitigation_cmd cmd;
1940 } ssb_mitigation_options[]  __initconst = {
1941         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
1942         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
1943         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
1944         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
1945         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1946 };
1947
1948 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1949 {
1950         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1951         char arg[20];
1952         int ret, i;
1953
1954         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1955             cpu_mitigations_off()) {
1956                 return SPEC_STORE_BYPASS_CMD_NONE;
1957         } else {
1958                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1959                                           arg, sizeof(arg));
1960                 if (ret < 0)
1961                         return SPEC_STORE_BYPASS_CMD_AUTO;
1962
1963                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1964                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1965                                 continue;
1966
1967                         cmd = ssb_mitigation_options[i].cmd;
1968                         break;
1969                 }
1970
1971                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1972                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1973                         return SPEC_STORE_BYPASS_CMD_AUTO;
1974                 }
1975         }
1976
1977         return cmd;
1978 }
1979
1980 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1981 {
1982         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1983         enum ssb_mitigation_cmd cmd;
1984
1985         if (!boot_cpu_has(X86_FEATURE_SSBD))
1986                 return mode;
1987
1988         cmd = ssb_parse_cmdline();
1989         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1990             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1991              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1992                 return mode;
1993
1994         switch (cmd) {
1995         case SPEC_STORE_BYPASS_CMD_SECCOMP:
1996                 /*
1997                  * Choose prctl+seccomp as the default mode if seccomp is
1998                  * enabled.
1999                  */
2000                 if (IS_ENABLED(CONFIG_SECCOMP))
2001                         mode = SPEC_STORE_BYPASS_SECCOMP;
2002                 else
2003                         mode = SPEC_STORE_BYPASS_PRCTL;
2004                 break;
2005         case SPEC_STORE_BYPASS_CMD_ON:
2006                 mode = SPEC_STORE_BYPASS_DISABLE;
2007                 break;
2008         case SPEC_STORE_BYPASS_CMD_AUTO:
2009         case SPEC_STORE_BYPASS_CMD_PRCTL:
2010                 mode = SPEC_STORE_BYPASS_PRCTL;
2011                 break;
2012         case SPEC_STORE_BYPASS_CMD_NONE:
2013                 break;
2014         }
2015
2016         /*
2017          * We have three CPU feature flags that are in play here:
2018          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
2019          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
2020          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
2021          */
2022         if (mode == SPEC_STORE_BYPASS_DISABLE) {
2023                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
2024                 /*
2025                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
2026                  * use a completely different MSR and bit dependent on family.
2027                  */
2028                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
2029                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
2030                         x86_amd_ssb_disable();
2031                 } else {
2032                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
2033                         update_spec_ctrl(x86_spec_ctrl_base);
2034                 }
2035         }
2036
2037         return mode;
2038 }
2039
2040 static void ssb_select_mitigation(void)
2041 {
2042         ssb_mode = __ssb_select_mitigation();
2043
2044         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2045                 pr_info("%s\n", ssb_strings[ssb_mode]);
2046 }
2047
2048 #undef pr_fmt
2049 #define pr_fmt(fmt)     "Speculation prctl: " fmt
2050
2051 static void task_update_spec_tif(struct task_struct *tsk)
2052 {
2053         /* Force the update of the real TIF bits */
2054         set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
2055
2056         /*
2057          * Immediately update the speculation control MSRs for the current
2058          * task, but for a non-current task delay setting the CPU
2059          * mitigation until it is scheduled next.
2060          *
2061          * This can only happen for SECCOMP mitigation. For PRCTL it's
2062          * always the current task.
2063          */
2064         if (tsk == current)
2065                 speculation_ctrl_update_current();
2066 }
2067
2068 static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl)
2069 {
2070
2071         if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2072                 return -EPERM;
2073
2074         switch (ctrl) {
2075         case PR_SPEC_ENABLE:
2076                 set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2077                 return 0;
2078         case PR_SPEC_DISABLE:
2079                 clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH);
2080                 return 0;
2081         default:
2082                 return -ERANGE;
2083         }
2084 }
2085
2086 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
2087 {
2088         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
2089             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
2090                 return -ENXIO;
2091
2092         switch (ctrl) {
2093         case PR_SPEC_ENABLE:
2094                 /* If speculation is force disabled, enable is not allowed */
2095                 if (task_spec_ssb_force_disable(task))
2096                         return -EPERM;
2097                 task_clear_spec_ssb_disable(task);
2098                 task_clear_spec_ssb_noexec(task);
2099                 task_update_spec_tif(task);
2100                 break;
2101         case PR_SPEC_DISABLE:
2102                 task_set_spec_ssb_disable(task);
2103                 task_clear_spec_ssb_noexec(task);
2104                 task_update_spec_tif(task);
2105                 break;
2106         case PR_SPEC_FORCE_DISABLE:
2107                 task_set_spec_ssb_disable(task);
2108                 task_set_spec_ssb_force_disable(task);
2109                 task_clear_spec_ssb_noexec(task);
2110                 task_update_spec_tif(task);
2111                 break;
2112         case PR_SPEC_DISABLE_NOEXEC:
2113                 if (task_spec_ssb_force_disable(task))
2114                         return -EPERM;
2115                 task_set_spec_ssb_disable(task);
2116                 task_set_spec_ssb_noexec(task);
2117                 task_update_spec_tif(task);
2118                 break;
2119         default:
2120                 return -ERANGE;
2121         }
2122         return 0;
2123 }
2124
2125 static bool is_spec_ib_user_controlled(void)
2126 {
2127         return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
2128                 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2129                 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
2130                 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
2131 }
2132
2133 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
2134 {
2135         switch (ctrl) {
2136         case PR_SPEC_ENABLE:
2137                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2138                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2139                         return 0;
2140
2141                 /*
2142                  * With strict mode for both IBPB and STIBP, the instruction
2143                  * code paths avoid checking this task flag and instead,
2144                  * unconditionally run the instruction. However, STIBP and IBPB
2145                  * are independent and either can be set to conditionally
2146                  * enabled regardless of the mode of the other.
2147                  *
2148                  * If either is set to conditional, allow the task flag to be
2149                  * updated, unless it was force-disabled by a previous prctl
2150                  * call. Currently, this is possible on an AMD CPU which has the
2151                  * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
2152                  * kernel is booted with 'spectre_v2_user=seccomp', then
2153                  * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
2154                  * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
2155                  */
2156                 if (!is_spec_ib_user_controlled() ||
2157                     task_spec_ib_force_disable(task))
2158                         return -EPERM;
2159
2160                 task_clear_spec_ib_disable(task);
2161                 task_update_spec_tif(task);
2162                 break;
2163         case PR_SPEC_DISABLE:
2164         case PR_SPEC_FORCE_DISABLE:
2165                 /*
2166                  * Indirect branch speculation is always allowed when
2167                  * mitigation is force disabled.
2168                  */
2169                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2170                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2171                         return -EPERM;
2172
2173                 if (!is_spec_ib_user_controlled())
2174                         return 0;
2175
2176                 task_set_spec_ib_disable(task);
2177                 if (ctrl == PR_SPEC_FORCE_DISABLE)
2178                         task_set_spec_ib_force_disable(task);
2179                 task_update_spec_tif(task);
2180                 if (task == current)
2181                         indirect_branch_prediction_barrier();
2182                 break;
2183         default:
2184                 return -ERANGE;
2185         }
2186         return 0;
2187 }
2188
2189 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
2190                              unsigned long ctrl)
2191 {
2192         switch (which) {
2193         case PR_SPEC_STORE_BYPASS:
2194                 return ssb_prctl_set(task, ctrl);
2195         case PR_SPEC_INDIRECT_BRANCH:
2196                 return ib_prctl_set(task, ctrl);
2197         case PR_SPEC_L1D_FLUSH:
2198                 return l1d_flush_prctl_set(task, ctrl);
2199         default:
2200                 return -ENODEV;
2201         }
2202 }
2203
2204 #ifdef CONFIG_SECCOMP
2205 void arch_seccomp_spec_mitigate(struct task_struct *task)
2206 {
2207         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
2208                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2209         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
2210             spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
2211                 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
2212 }
2213 #endif
2214
2215 static int l1d_flush_prctl_get(struct task_struct *task)
2216 {
2217         if (!static_branch_unlikely(&switch_mm_cond_l1d_flush))
2218                 return PR_SPEC_FORCE_DISABLE;
2219
2220         if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH))
2221                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2222         else
2223                 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2224 }
2225
2226 static int ssb_prctl_get(struct task_struct *task)
2227 {
2228         switch (ssb_mode) {
2229         case SPEC_STORE_BYPASS_NONE:
2230                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
2231                         return PR_SPEC_ENABLE;
2232                 return PR_SPEC_NOT_AFFECTED;
2233         case SPEC_STORE_BYPASS_DISABLE:
2234                 return PR_SPEC_DISABLE;
2235         case SPEC_STORE_BYPASS_SECCOMP:
2236         case SPEC_STORE_BYPASS_PRCTL:
2237                 if (task_spec_ssb_force_disable(task))
2238                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2239                 if (task_spec_ssb_noexec(task))
2240                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
2241                 if (task_spec_ssb_disable(task))
2242                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2243                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2244         }
2245         BUG();
2246 }
2247
2248 static int ib_prctl_get(struct task_struct *task)
2249 {
2250         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
2251                 return PR_SPEC_NOT_AFFECTED;
2252
2253         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
2254             spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
2255                 return PR_SPEC_ENABLE;
2256         else if (is_spec_ib_user_controlled()) {
2257                 if (task_spec_ib_force_disable(task))
2258                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
2259                 if (task_spec_ib_disable(task))
2260                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
2261                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
2262         } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
2263             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2264             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
2265                 return PR_SPEC_DISABLE;
2266         else
2267                 return PR_SPEC_NOT_AFFECTED;
2268 }
2269
2270 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
2271 {
2272         switch (which) {
2273         case PR_SPEC_STORE_BYPASS:
2274                 return ssb_prctl_get(task);
2275         case PR_SPEC_INDIRECT_BRANCH:
2276                 return ib_prctl_get(task);
2277         case PR_SPEC_L1D_FLUSH:
2278                 return l1d_flush_prctl_get(task);
2279         default:
2280                 return -ENODEV;
2281         }
2282 }
2283
2284 void x86_spec_ctrl_setup_ap(void)
2285 {
2286         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
2287                 update_spec_ctrl(x86_spec_ctrl_base);
2288
2289         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
2290                 x86_amd_ssb_disable();
2291 }
2292
2293 bool itlb_multihit_kvm_mitigation;
2294 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
2295
2296 #undef pr_fmt
2297 #define pr_fmt(fmt)     "L1TF: " fmt
2298
2299 /* Default mitigation for L1TF-affected CPUs */
2300 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
2301 #if IS_ENABLED(CONFIG_KVM_INTEL)
2302 EXPORT_SYMBOL_GPL(l1tf_mitigation);
2303 #endif
2304 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
2305 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
2306
2307 /*
2308  * These CPUs all support 44bits physical address space internally in the
2309  * cache but CPUID can report a smaller number of physical address bits.
2310  *
2311  * The L1TF mitigation uses the top most address bit for the inversion of
2312  * non present PTEs. When the installed memory reaches into the top most
2313  * address bit due to memory holes, which has been observed on machines
2314  * which report 36bits physical address bits and have 32G RAM installed,
2315  * then the mitigation range check in l1tf_select_mitigation() triggers.
2316  * This is a false positive because the mitigation is still possible due to
2317  * the fact that the cache uses 44bit internally. Use the cache bits
2318  * instead of the reported physical bits and adjust them on the affected
2319  * machines to 44bit if the reported bits are less than 44.
2320  */
2321 static void override_cache_bits(struct cpuinfo_x86 *c)
2322 {
2323         if (c->x86 != 6)
2324                 return;
2325
2326         switch (c->x86_model) {
2327         case INTEL_FAM6_NEHALEM:
2328         case INTEL_FAM6_WESTMERE:
2329         case INTEL_FAM6_SANDYBRIDGE:
2330         case INTEL_FAM6_IVYBRIDGE:
2331         case INTEL_FAM6_HASWELL:
2332         case INTEL_FAM6_HASWELL_L:
2333         case INTEL_FAM6_HASWELL_G:
2334         case INTEL_FAM6_BROADWELL:
2335         case INTEL_FAM6_BROADWELL_G:
2336         case INTEL_FAM6_SKYLAKE_L:
2337         case INTEL_FAM6_SKYLAKE:
2338         case INTEL_FAM6_KABYLAKE_L:
2339         case INTEL_FAM6_KABYLAKE:
2340                 if (c->x86_cache_bits < 44)
2341                         c->x86_cache_bits = 44;
2342                 break;
2343         }
2344 }
2345
2346 static void __init l1tf_select_mitigation(void)
2347 {
2348         u64 half_pa;
2349
2350         if (!boot_cpu_has_bug(X86_BUG_L1TF))
2351                 return;
2352
2353         if (cpu_mitigations_off())
2354                 l1tf_mitigation = L1TF_MITIGATION_OFF;
2355         else if (cpu_mitigations_auto_nosmt())
2356                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2357
2358         override_cache_bits(&boot_cpu_data);
2359
2360         switch (l1tf_mitigation) {
2361         case L1TF_MITIGATION_OFF:
2362         case L1TF_MITIGATION_FLUSH_NOWARN:
2363         case L1TF_MITIGATION_FLUSH:
2364                 break;
2365         case L1TF_MITIGATION_FLUSH_NOSMT:
2366         case L1TF_MITIGATION_FULL:
2367                 cpu_smt_disable(false);
2368                 break;
2369         case L1TF_MITIGATION_FULL_FORCE:
2370                 cpu_smt_disable(true);
2371                 break;
2372         }
2373
2374 #if CONFIG_PGTABLE_LEVELS == 2
2375         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
2376         return;
2377 #endif
2378
2379         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
2380         if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
2381                         e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
2382                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
2383                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
2384                                 half_pa);
2385                 pr_info("However, doing so will make a part of your RAM unusable.\n");
2386                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
2387                 return;
2388         }
2389
2390         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
2391 }
2392
2393 static int __init l1tf_cmdline(char *str)
2394 {
2395         if (!boot_cpu_has_bug(X86_BUG_L1TF))
2396                 return 0;
2397
2398         if (!str)
2399                 return -EINVAL;
2400
2401         if (!strcmp(str, "off"))
2402                 l1tf_mitigation = L1TF_MITIGATION_OFF;
2403         else if (!strcmp(str, "flush,nowarn"))
2404                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
2405         else if (!strcmp(str, "flush"))
2406                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
2407         else if (!strcmp(str, "flush,nosmt"))
2408                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
2409         else if (!strcmp(str, "full"))
2410                 l1tf_mitigation = L1TF_MITIGATION_FULL;
2411         else if (!strcmp(str, "full,force"))
2412                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
2413
2414         return 0;
2415 }
2416 early_param("l1tf", l1tf_cmdline);
2417
2418 #undef pr_fmt
2419 #define pr_fmt(fmt)     "Speculative Return Stack Overflow: " fmt
2420
2421 enum srso_mitigation {
2422         SRSO_MITIGATION_NONE,
2423         SRSO_MITIGATION_UCODE_NEEDED,
2424         SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
2425         SRSO_MITIGATION_MICROCODE,
2426         SRSO_MITIGATION_SAFE_RET,
2427         SRSO_MITIGATION_IBPB,
2428         SRSO_MITIGATION_IBPB_ON_VMEXIT,
2429 };
2430
2431 enum srso_mitigation_cmd {
2432         SRSO_CMD_OFF,
2433         SRSO_CMD_MICROCODE,
2434         SRSO_CMD_SAFE_RET,
2435         SRSO_CMD_IBPB,
2436         SRSO_CMD_IBPB_ON_VMEXIT,
2437 };
2438
2439 static const char * const srso_strings[] = {
2440         [SRSO_MITIGATION_NONE]                  = "Vulnerable",
2441         [SRSO_MITIGATION_UCODE_NEEDED]          = "Vulnerable: No microcode",
2442         [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
2443         [SRSO_MITIGATION_MICROCODE]             = "Vulnerable: Microcode, no safe RET",
2444         [SRSO_MITIGATION_SAFE_RET]              = "Mitigation: Safe RET",
2445         [SRSO_MITIGATION_IBPB]                  = "Mitigation: IBPB",
2446         [SRSO_MITIGATION_IBPB_ON_VMEXIT]        = "Mitigation: IBPB on VMEXIT only"
2447 };
2448
2449 static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
2450 static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET;
2451
2452 static int __init srso_parse_cmdline(char *str)
2453 {
2454         if (!str)
2455                 return -EINVAL;
2456
2457         if (!strcmp(str, "off"))
2458                 srso_cmd = SRSO_CMD_OFF;
2459         else if (!strcmp(str, "microcode"))
2460                 srso_cmd = SRSO_CMD_MICROCODE;
2461         else if (!strcmp(str, "safe-ret"))
2462                 srso_cmd = SRSO_CMD_SAFE_RET;
2463         else if (!strcmp(str, "ibpb"))
2464                 srso_cmd = SRSO_CMD_IBPB;
2465         else if (!strcmp(str, "ibpb-vmexit"))
2466                 srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT;
2467         else
2468                 pr_err("Ignoring unknown SRSO option (%s).", str);
2469
2470         return 0;
2471 }
2472 early_param("spec_rstack_overflow", srso_parse_cmdline);
2473
2474 #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
2475
2476 static void __init srso_select_mitigation(void)
2477 {
2478         bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
2479
2480         if (cpu_mitigations_off())
2481                 return;
2482
2483         if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
2484                 if (boot_cpu_has(X86_FEATURE_SBPB))
2485                         x86_pred_cmd = PRED_CMD_SBPB;
2486                 return;
2487         }
2488
2489         if (has_microcode) {
2490                 /*
2491                  * Zen1/2 with SMT off aren't vulnerable after the right
2492                  * IBPB microcode has been applied.
2493                  *
2494                  * Zen1/2 don't have SBPB, no need to try to enable it here.
2495                  */
2496                 if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
2497                         setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
2498                         return;
2499                 }
2500
2501                 if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2502                         srso_mitigation = SRSO_MITIGATION_IBPB;
2503                         goto out;
2504                 }
2505         } else {
2506                 pr_warn("IBPB-extending microcode not applied!\n");
2507                 pr_warn(SRSO_NOTICE);
2508
2509                 /* may be overwritten by SRSO_CMD_SAFE_RET below */
2510                 srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
2511         }
2512
2513         switch (srso_cmd) {
2514         case SRSO_CMD_OFF:
2515                 if (boot_cpu_has(X86_FEATURE_SBPB))
2516                         x86_pred_cmd = PRED_CMD_SBPB;
2517                 return;
2518
2519         case SRSO_CMD_MICROCODE:
2520                 if (has_microcode) {
2521                         srso_mitigation = SRSO_MITIGATION_MICROCODE;
2522                         pr_warn(SRSO_NOTICE);
2523                 }
2524                 break;
2525
2526         case SRSO_CMD_SAFE_RET:
2527                 if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2528                         /*
2529                          * Enable the return thunk for generated code
2530                          * like ftrace, static_call, etc.
2531                          */
2532                         setup_force_cpu_cap(X86_FEATURE_RETHUNK);
2533                         setup_force_cpu_cap(X86_FEATURE_UNRET);
2534
2535                         if (boot_cpu_data.x86 == 0x19) {
2536                                 setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
2537                                 x86_return_thunk = srso_alias_return_thunk;
2538                         } else {
2539                                 setup_force_cpu_cap(X86_FEATURE_SRSO);
2540                                 x86_return_thunk = srso_return_thunk;
2541                         }
2542                         if (has_microcode)
2543                                 srso_mitigation = SRSO_MITIGATION_SAFE_RET;
2544                         else
2545                                 srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
2546                 } else {
2547                         pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2548                 }
2549                 break;
2550
2551         case SRSO_CMD_IBPB:
2552                 if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
2553                         if (has_microcode) {
2554                                 setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
2555                                 srso_mitigation = SRSO_MITIGATION_IBPB;
2556                         }
2557                 } else {
2558                         pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
2559                 }
2560                 break;
2561
2562         case SRSO_CMD_IBPB_ON_VMEXIT:
2563                 if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
2564                         if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
2565                                 setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
2566                                 srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
2567                         }
2568                 } else {
2569                         pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
2570                 }
2571                 break;
2572         }
2573
2574 out:
2575         pr_info("%s\n", srso_strings[srso_mitigation]);
2576 }
2577
2578 #undef pr_fmt
2579 #define pr_fmt(fmt) fmt
2580
2581 #ifdef CONFIG_SYSFS
2582
2583 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2584
2585 #if IS_ENABLED(CONFIG_KVM_INTEL)
2586 static const char * const l1tf_vmx_states[] = {
2587         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
2588         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
2589         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
2590         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
2591         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
2592         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
2593 };
2594
2595 static ssize_t l1tf_show_state(char *buf)
2596 {
2597         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
2598                 return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
2599
2600         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
2601             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
2602              sched_smt_active())) {
2603                 return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
2604                                   l1tf_vmx_states[l1tf_vmx_mitigation]);
2605         }
2606
2607         return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
2608                           l1tf_vmx_states[l1tf_vmx_mitigation],
2609                           sched_smt_active() ? "vulnerable" : "disabled");
2610 }
2611
2612 static ssize_t itlb_multihit_show_state(char *buf)
2613 {
2614         if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) ||
2615             !boot_cpu_has(X86_FEATURE_VMX))
2616                 return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n");
2617         else if (!(cr4_read_shadow() & X86_CR4_VMXE))
2618                 return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n");
2619         else if (itlb_multihit_kvm_mitigation)
2620                 return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n");
2621         else
2622                 return sysfs_emit(buf, "KVM: Vulnerable\n");
2623 }
2624 #else
2625 static ssize_t l1tf_show_state(char *buf)
2626 {
2627         return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG);
2628 }
2629
2630 static ssize_t itlb_multihit_show_state(char *buf)
2631 {
2632         return sysfs_emit(buf, "Processor vulnerable\n");
2633 }
2634 #endif
2635
2636 static ssize_t mds_show_state(char *buf)
2637 {
2638         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2639                 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2640                                   mds_strings[mds_mitigation]);
2641         }
2642
2643         if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
2644                 return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2645                                   (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
2646                                    sched_smt_active() ? "mitigated" : "disabled"));
2647         }
2648
2649         return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2650                           sched_smt_active() ? "vulnerable" : "disabled");
2651 }
2652
2653 static ssize_t tsx_async_abort_show_state(char *buf)
2654 {
2655         if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
2656             (taa_mitigation == TAA_MITIGATION_OFF))
2657                 return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]);
2658
2659         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2660                 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2661                                   taa_strings[taa_mitigation]);
2662         }
2663
2664         return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
2665                           sched_smt_active() ? "vulnerable" : "disabled");
2666 }
2667
2668 static ssize_t mmio_stale_data_show_state(char *buf)
2669 {
2670         if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2671                 return sysfs_emit(buf, "Unknown: No mitigations\n");
2672
2673         if (mmio_mitigation == MMIO_MITIGATION_OFF)
2674                 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2675
2676         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2677                 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2678                                   mmio_strings[mmio_mitigation]);
2679         }
2680
2681         return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2682                           sched_smt_active() ? "vulnerable" : "disabled");
2683 }
2684
2685 static ssize_t rfds_show_state(char *buf)
2686 {
2687         return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
2688 }
2689
2690 static char *stibp_state(void)
2691 {
2692         if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
2693             !boot_cpu_has(X86_FEATURE_AUTOIBRS))
2694                 return "";
2695
2696         switch (spectre_v2_user_stibp) {
2697         case SPECTRE_V2_USER_NONE:
2698                 return ", STIBP: disabled";
2699         case SPECTRE_V2_USER_STRICT:
2700                 return ", STIBP: forced";
2701         case SPECTRE_V2_USER_STRICT_PREFERRED:
2702                 return ", STIBP: always-on";
2703         case SPECTRE_V2_USER_PRCTL:
2704         case SPECTRE_V2_USER_SECCOMP:
2705                 if (static_key_enabled(&switch_to_cond_stibp))
2706                         return ", STIBP: conditional";
2707         }
2708         return "";
2709 }
2710
2711 static char *ibpb_state(void)
2712 {
2713         if (boot_cpu_has(X86_FEATURE_IBPB)) {
2714                 if (static_key_enabled(&switch_mm_always_ibpb))
2715                         return ", IBPB: always-on";
2716                 if (static_key_enabled(&switch_mm_cond_ibpb))
2717                         return ", IBPB: conditional";
2718                 return ", IBPB: disabled";
2719         }
2720         return "";
2721 }
2722
2723 static char *pbrsb_eibrs_state(void)
2724 {
2725         if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2726                 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2727                     boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2728                         return ", PBRSB-eIBRS: SW sequence";
2729                 else
2730                         return ", PBRSB-eIBRS: Vulnerable";
2731         } else {
2732                 return ", PBRSB-eIBRS: Not affected";
2733         }
2734 }
2735
2736 static ssize_t spectre_v2_show_state(char *buf)
2737 {
2738         if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2739                 return sysfs_emit(buf, "Vulnerable: LFENCE\n");
2740
2741         if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2742                 return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2743
2744         if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2745             spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2746                 return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2747
2748         return sysfs_emit(buf, "%s%s%s%s%s%s%s\n",
2749                           spectre_v2_strings[spectre_v2_enabled],
2750                           ibpb_state(),
2751                           boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2752                           stibp_state(),
2753                           boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2754                           pbrsb_eibrs_state(),
2755                           spectre_v2_module_string());
2756 }
2757
2758 static ssize_t srbds_show_state(char *buf)
2759 {
2760         return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]);
2761 }
2762
2763 static ssize_t retbleed_show_state(char *buf)
2764 {
2765         if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET ||
2766             retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
2767                 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
2768                     boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
2769                         return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n");
2770
2771                 return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation],
2772                                   !sched_smt_active() ? "disabled" :
2773                                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
2774                                   spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ?
2775                                   "enabled with STIBP protection" : "vulnerable");
2776         }
2777
2778         return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
2779 }
2780
2781 static ssize_t srso_show_state(char *buf)
2782 {
2783         if (boot_cpu_has(X86_FEATURE_SRSO_NO))
2784                 return sysfs_emit(buf, "Mitigation: SMT disabled\n");
2785
2786         return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
2787 }
2788
2789 static ssize_t gds_show_state(char *buf)
2790 {
2791         return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
2792 }
2793
2794 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2795                                char *buf, unsigned int bug)
2796 {
2797         if (!boot_cpu_has_bug(bug))
2798                 return sysfs_emit(buf, "Not affected\n");
2799
2800         switch (bug) {
2801         case X86_BUG_CPU_MELTDOWN:
2802                 if (boot_cpu_has(X86_FEATURE_PTI))
2803                         return sysfs_emit(buf, "Mitigation: PTI\n");
2804
2805                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
2806                         return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
2807
2808                 break;
2809
2810         case X86_BUG_SPECTRE_V1:
2811                 return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2812
2813         case X86_BUG_SPECTRE_V2:
2814                 return spectre_v2_show_state(buf);
2815
2816         case X86_BUG_SPEC_STORE_BYPASS:
2817                 return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]);
2818
2819         case X86_BUG_L1TF:
2820                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2821                         return l1tf_show_state(buf);
2822                 break;
2823
2824         case X86_BUG_MDS:
2825                 return mds_show_state(buf);
2826
2827         case X86_BUG_TAA:
2828                 return tsx_async_abort_show_state(buf);
2829
2830         case X86_BUG_ITLB_MULTIHIT:
2831                 return itlb_multihit_show_state(buf);
2832
2833         case X86_BUG_SRBDS:
2834                 return srbds_show_state(buf);
2835
2836         case X86_BUG_MMIO_STALE_DATA:
2837         case X86_BUG_MMIO_UNKNOWN:
2838                 return mmio_stale_data_show_state(buf);
2839
2840         case X86_BUG_RETBLEED:
2841                 return retbleed_show_state(buf);
2842
2843         case X86_BUG_SRSO:
2844                 return srso_show_state(buf);
2845
2846         case X86_BUG_GDS:
2847                 return gds_show_state(buf);
2848
2849         case X86_BUG_RFDS:
2850                 return rfds_show_state(buf);
2851
2852         default:
2853                 break;
2854         }
2855
2856         return sysfs_emit(buf, "Vulnerable\n");
2857 }
2858
2859 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2860 {
2861         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2862 }
2863
2864 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2865 {
2866         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2867 }
2868
2869 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2870 {
2871         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2872 }
2873
2874 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2875 {
2876         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2877 }
2878
2879 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2880 {
2881         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2882 }
2883
2884 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2885 {
2886         return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2887 }
2888
2889 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2890 {
2891         return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2892 }
2893
2894 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2895 {
2896         return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2897 }
2898
2899 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2900 {
2901         return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2902 }
2903
2904 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2905 {
2906         if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2907                 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
2908         else
2909                 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
2910 }
2911
2912 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
2913 {
2914         return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
2915 }
2916
2917 ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf)
2918 {
2919         return cpu_show_common(dev, attr, buf, X86_BUG_SRSO);
2920 }
2921
2922 ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf)
2923 {
2924         return cpu_show_common(dev, attr, buf, X86_BUG_GDS);
2925 }
2926
2927 ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf)
2928 {
2929         return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
2930 }
2931 #endif
2932
2933 void __warn_thunk(void)
2934 {
2935         WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n");
2936 }