Merge branch 'x86-pti-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
20 #include <asm/bugs.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/msr.h>
25 #include <asm/vmx.h>
26 #include <asm/paravirt.h>
27 #include <asm/alternative.h>
28 #include <asm/pgtable.h>
29 #include <asm/set_memory.h>
30 #include <asm/intel-family.h>
31 #include <asm/e820/api.h>
32 #include <asm/hypervisor.h>
33
34 static void __init spectre_v2_select_mitigation(void);
35 static void __init ssb_select_mitigation(void);
36 static void __init l1tf_select_mitigation(void);
37
38 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
39 u64 x86_spec_ctrl_base;
40 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41 static DEFINE_MUTEX(spec_ctrl_mutex);
42
43 /*
44  * The vendor and possibly platform specific bits which can be modified in
45  * x86_spec_ctrl_base.
46  */
47 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
48
49 /*
50  * AMD specific MSR info for Speculative Store Bypass control.
51  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
52  */
53 u64 __ro_after_init x86_amd_ls_cfg_base;
54 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
55
56 void __init check_bugs(void)
57 {
58         identify_boot_cpu();
59
60         /*
61          * identify_boot_cpu() initialized SMT support information, let the
62          * core code know.
63          */
64         cpu_smt_check_topology_early();
65
66         if (!IS_ENABLED(CONFIG_SMP)) {
67                 pr_info("CPU: ");
68                 print_cpu_info(&boot_cpu_data);
69         }
70
71         /*
72          * Read the SPEC_CTRL MSR to account for reserved bits which may
73          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
74          * init code as it is not enumerated and depends on the family.
75          */
76         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
77                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
78
79         /* Allow STIBP in MSR_SPEC_CTRL if supported */
80         if (boot_cpu_has(X86_FEATURE_STIBP))
81                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
82
83         /* Select the proper spectre mitigation before patching alternatives */
84         spectre_v2_select_mitigation();
85
86         /*
87          * Select proper mitigation for any exposure to the Speculative Store
88          * Bypass vulnerability.
89          */
90         ssb_select_mitigation();
91
92         l1tf_select_mitigation();
93
94 #ifdef CONFIG_X86_32
95         /*
96          * Check whether we are able to run this kernel safely on SMP.
97          *
98          * - i386 is no longer supported.
99          * - In order to run on anything without a TSC, we need to be
100          *   compiled for a i486.
101          */
102         if (boot_cpu_data.x86 < 4)
103                 panic("Kernel requires i486+ for 'invlpg' and other features");
104
105         init_utsname()->machine[1] =
106                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
107         alternative_instructions();
108
109         fpu__init_check_bugs();
110 #else /* CONFIG_X86_64 */
111         alternative_instructions();
112
113         /*
114          * Make sure the first 2MB area is not mapped by huge pages
115          * There are typically fixed size MTRRs in there and overlapping
116          * MTRRs into large pages causes slow downs.
117          *
118          * Right now we don't do that with gbpages because there seems
119          * very little benefit for that case.
120          */
121         if (!direct_gbpages)
122                 set_memory_4k((unsigned long)__va(0), 1);
123 #endif
124 }
125
126 /* The kernel command line selection */
127 enum spectre_v2_mitigation_cmd {
128         SPECTRE_V2_CMD_NONE,
129         SPECTRE_V2_CMD_AUTO,
130         SPECTRE_V2_CMD_FORCE,
131         SPECTRE_V2_CMD_RETPOLINE,
132         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
133         SPECTRE_V2_CMD_RETPOLINE_AMD,
134 };
135
136 static const char *spectre_v2_strings[] = {
137         [SPECTRE_V2_NONE]                       = "Vulnerable",
138         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
139         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
140         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
141         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
142         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
143 };
144
145 #undef pr_fmt
146 #define pr_fmt(fmt)     "Spectre V2 : " fmt
147
148 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
149         SPECTRE_V2_NONE;
150
151 void
152 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
153 {
154         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
155         struct thread_info *ti = current_thread_info();
156
157         /* Is MSR_SPEC_CTRL implemented ? */
158         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
159                 /*
160                  * Restrict guest_spec_ctrl to supported values. Clear the
161                  * modifiable bits in the host base value and or the
162                  * modifiable bits from the guest value.
163                  */
164                 guestval = hostval & ~x86_spec_ctrl_mask;
165                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
166
167                 /* SSBD controlled in MSR_SPEC_CTRL */
168                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
169                     static_cpu_has(X86_FEATURE_AMD_SSBD))
170                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
171
172                 if (hostval != guestval) {
173                         msrval = setguest ? guestval : hostval;
174                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
175                 }
176         }
177
178         /*
179          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
180          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
181          */
182         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
183             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
184                 return;
185
186         /*
187          * If the host has SSBD mitigation enabled, force it in the host's
188          * virtual MSR value. If its not permanently enabled, evaluate
189          * current's TIF_SSBD thread flag.
190          */
191         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
192                 hostval = SPEC_CTRL_SSBD;
193         else
194                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
195
196         /* Sanitize the guest value */
197         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
198
199         if (hostval != guestval) {
200                 unsigned long tif;
201
202                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
203                                  ssbd_spec_ctrl_to_tif(hostval);
204
205                 speculative_store_bypass_update(tif);
206         }
207 }
208 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
209
210 static void x86_amd_ssb_disable(void)
211 {
212         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
213
214         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
215                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
216         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
217                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
218 }
219
220 #ifdef RETPOLINE
221 static bool spectre_v2_bad_module;
222
223 bool retpoline_module_ok(bool has_retpoline)
224 {
225         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
226                 return true;
227
228         pr_err("System may be vulnerable to spectre v2\n");
229         spectre_v2_bad_module = true;
230         return false;
231 }
232
233 static inline const char *spectre_v2_module_string(void)
234 {
235         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
236 }
237 #else
238 static inline const char *spectre_v2_module_string(void) { return ""; }
239 #endif
240
241 static void __init spec2_print_if_insecure(const char *reason)
242 {
243         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
244                 pr_info("%s selected on command line.\n", reason);
245 }
246
247 static void __init spec2_print_if_secure(const char *reason)
248 {
249         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
250                 pr_info("%s selected on command line.\n", reason);
251 }
252
253 static inline bool retp_compiler(void)
254 {
255         return __is_defined(RETPOLINE);
256 }
257
258 static inline bool match_option(const char *arg, int arglen, const char *opt)
259 {
260         int len = strlen(opt);
261
262         return len == arglen && !strncmp(arg, opt, len);
263 }
264
265 static const struct {
266         const char *option;
267         enum spectre_v2_mitigation_cmd cmd;
268         bool secure;
269 } mitigation_options[] = {
270         { "off",               SPECTRE_V2_CMD_NONE,              false },
271         { "on",                SPECTRE_V2_CMD_FORCE,             true },
272         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
273         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
274         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
275         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
276 };
277
278 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
279 {
280         char arg[20];
281         int ret, i;
282         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
283
284         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
285                 return SPECTRE_V2_CMD_NONE;
286         else {
287                 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
288                 if (ret < 0)
289                         return SPECTRE_V2_CMD_AUTO;
290
291                 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
292                         if (!match_option(arg, ret, mitigation_options[i].option))
293                                 continue;
294                         cmd = mitigation_options[i].cmd;
295                         break;
296                 }
297
298                 if (i >= ARRAY_SIZE(mitigation_options)) {
299                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
300                         return SPECTRE_V2_CMD_AUTO;
301                 }
302         }
303
304         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
305              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
306              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
307             !IS_ENABLED(CONFIG_RETPOLINE)) {
308                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
309                 return SPECTRE_V2_CMD_AUTO;
310         }
311
312         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
313             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON &&
314             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
315                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
316                 return SPECTRE_V2_CMD_AUTO;
317         }
318
319         if (mitigation_options[i].secure)
320                 spec2_print_if_secure(mitigation_options[i].option);
321         else
322                 spec2_print_if_insecure(mitigation_options[i].option);
323
324         return cmd;
325 }
326
327 static bool stibp_needed(void)
328 {
329         if (spectre_v2_enabled == SPECTRE_V2_NONE)
330                 return false;
331
332         if (!boot_cpu_has(X86_FEATURE_STIBP))
333                 return false;
334
335         return true;
336 }
337
338 static void update_stibp_msr(void *info)
339 {
340         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
341 }
342
343 void arch_smt_update(void)
344 {
345         u64 mask;
346
347         if (!stibp_needed())
348                 return;
349
350         mutex_lock(&spec_ctrl_mutex);
351         mask = x86_spec_ctrl_base;
352         if (cpu_smt_control == CPU_SMT_ENABLED)
353                 mask |= SPEC_CTRL_STIBP;
354         else
355                 mask &= ~SPEC_CTRL_STIBP;
356
357         if (mask != x86_spec_ctrl_base) {
358                 pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
359                                 cpu_smt_control == CPU_SMT_ENABLED ?
360                                 "Enabling" : "Disabling");
361                 x86_spec_ctrl_base = mask;
362                 on_each_cpu(update_stibp_msr, NULL, 1);
363         }
364         mutex_unlock(&spec_ctrl_mutex);
365 }
366
367 static void __init spectre_v2_select_mitigation(void)
368 {
369         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
370         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
371
372         /*
373          * If the CPU is not affected and the command line mode is NONE or AUTO
374          * then nothing to do.
375          */
376         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
377             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
378                 return;
379
380         switch (cmd) {
381         case SPECTRE_V2_CMD_NONE:
382                 return;
383
384         case SPECTRE_V2_CMD_FORCE:
385         case SPECTRE_V2_CMD_AUTO:
386                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
387                         mode = SPECTRE_V2_IBRS_ENHANCED;
388                         /* Force it so VMEXIT will restore correctly */
389                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
390                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
391                         goto specv2_set_mode;
392                 }
393                 if (IS_ENABLED(CONFIG_RETPOLINE))
394                         goto retpoline_auto;
395                 break;
396         case SPECTRE_V2_CMD_RETPOLINE_AMD:
397                 if (IS_ENABLED(CONFIG_RETPOLINE))
398                         goto retpoline_amd;
399                 break;
400         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
401                 if (IS_ENABLED(CONFIG_RETPOLINE))
402                         goto retpoline_generic;
403                 break;
404         case SPECTRE_V2_CMD_RETPOLINE:
405                 if (IS_ENABLED(CONFIG_RETPOLINE))
406                         goto retpoline_auto;
407                 break;
408         }
409         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
410         return;
411
412 retpoline_auto:
413         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
414             boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
415         retpoline_amd:
416                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
417                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
418                         goto retpoline_generic;
419                 }
420                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
421                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
422                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
423                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
424         } else {
425         retpoline_generic:
426                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
427                                          SPECTRE_V2_RETPOLINE_MINIMAL;
428                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
429         }
430
431 specv2_set_mode:
432         spectre_v2_enabled = mode;
433         pr_info("%s\n", spectre_v2_strings[mode]);
434
435         /*
436          * If spectre v2 protection has been enabled, unconditionally fill
437          * RSB during a context switch; this protects against two independent
438          * issues:
439          *
440          *      - RSB underflow (and switch to BTB) on Skylake+
441          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
442          */
443         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
444         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
445
446         /* Initialize Indirect Branch Prediction Barrier if supported */
447         if (boot_cpu_has(X86_FEATURE_IBPB)) {
448                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
449                 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
450         }
451
452         /*
453          * Retpoline means the kernel is safe because it has no indirect
454          * branches. Enhanced IBRS protects firmware too, so, enable restricted
455          * speculation around firmware calls only when Enhanced IBRS isn't
456          * supported.
457          *
458          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
459          * the user might select retpoline on the kernel command line and if
460          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
461          * enable IBRS around firmware calls.
462          */
463         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
464                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
465                 pr_info("Enabling Restricted Speculation for firmware calls\n");
466         }
467
468         /* Enable STIBP if appropriate */
469         arch_smt_update();
470 }
471
472 #undef pr_fmt
473 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
474
475 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
476
477 /* The kernel command line selection */
478 enum ssb_mitigation_cmd {
479         SPEC_STORE_BYPASS_CMD_NONE,
480         SPEC_STORE_BYPASS_CMD_AUTO,
481         SPEC_STORE_BYPASS_CMD_ON,
482         SPEC_STORE_BYPASS_CMD_PRCTL,
483         SPEC_STORE_BYPASS_CMD_SECCOMP,
484 };
485
486 static const char *ssb_strings[] = {
487         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
488         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
489         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
490         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
491 };
492
493 static const struct {
494         const char *option;
495         enum ssb_mitigation_cmd cmd;
496 } ssb_mitigation_options[] = {
497         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
498         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
499         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
500         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
501         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
502 };
503
504 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
505 {
506         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
507         char arg[20];
508         int ret, i;
509
510         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
511                 return SPEC_STORE_BYPASS_CMD_NONE;
512         } else {
513                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
514                                           arg, sizeof(arg));
515                 if (ret < 0)
516                         return SPEC_STORE_BYPASS_CMD_AUTO;
517
518                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
519                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
520                                 continue;
521
522                         cmd = ssb_mitigation_options[i].cmd;
523                         break;
524                 }
525
526                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
527                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
528                         return SPEC_STORE_BYPASS_CMD_AUTO;
529                 }
530         }
531
532         return cmd;
533 }
534
535 static enum ssb_mitigation __init __ssb_select_mitigation(void)
536 {
537         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
538         enum ssb_mitigation_cmd cmd;
539
540         if (!boot_cpu_has(X86_FEATURE_SSBD))
541                 return mode;
542
543         cmd = ssb_parse_cmdline();
544         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
545             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
546              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
547                 return mode;
548
549         switch (cmd) {
550         case SPEC_STORE_BYPASS_CMD_AUTO:
551         case SPEC_STORE_BYPASS_CMD_SECCOMP:
552                 /*
553                  * Choose prctl+seccomp as the default mode if seccomp is
554                  * enabled.
555                  */
556                 if (IS_ENABLED(CONFIG_SECCOMP))
557                         mode = SPEC_STORE_BYPASS_SECCOMP;
558                 else
559                         mode = SPEC_STORE_BYPASS_PRCTL;
560                 break;
561         case SPEC_STORE_BYPASS_CMD_ON:
562                 mode = SPEC_STORE_BYPASS_DISABLE;
563                 break;
564         case SPEC_STORE_BYPASS_CMD_PRCTL:
565                 mode = SPEC_STORE_BYPASS_PRCTL;
566                 break;
567         case SPEC_STORE_BYPASS_CMD_NONE:
568                 break;
569         }
570
571         /*
572          * We have three CPU feature flags that are in play here:
573          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
574          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
575          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
576          */
577         if (mode == SPEC_STORE_BYPASS_DISABLE) {
578                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
579                 /*
580                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
581                  * use a completely different MSR and bit dependent on family.
582                  */
583                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
584                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
585                         x86_amd_ssb_disable();
586                 } else {
587                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
588                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
589                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
590                 }
591         }
592
593         return mode;
594 }
595
596 static void ssb_select_mitigation(void)
597 {
598         ssb_mode = __ssb_select_mitigation();
599
600         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
601                 pr_info("%s\n", ssb_strings[ssb_mode]);
602 }
603
604 #undef pr_fmt
605 #define pr_fmt(fmt)     "Speculation prctl: " fmt
606
607 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
608 {
609         bool update;
610
611         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
612             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
613                 return -ENXIO;
614
615         switch (ctrl) {
616         case PR_SPEC_ENABLE:
617                 /* If speculation is force disabled, enable is not allowed */
618                 if (task_spec_ssb_force_disable(task))
619                         return -EPERM;
620                 task_clear_spec_ssb_disable(task);
621                 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
622                 break;
623         case PR_SPEC_DISABLE:
624                 task_set_spec_ssb_disable(task);
625                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
626                 break;
627         case PR_SPEC_FORCE_DISABLE:
628                 task_set_spec_ssb_disable(task);
629                 task_set_spec_ssb_force_disable(task);
630                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
631                 break;
632         default:
633                 return -ERANGE;
634         }
635
636         /*
637          * If being set on non-current task, delay setting the CPU
638          * mitigation until it is next scheduled.
639          */
640         if (task == current && update)
641                 speculative_store_bypass_update_current();
642
643         return 0;
644 }
645
646 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
647                              unsigned long ctrl)
648 {
649         switch (which) {
650         case PR_SPEC_STORE_BYPASS:
651                 return ssb_prctl_set(task, ctrl);
652         default:
653                 return -ENODEV;
654         }
655 }
656
657 #ifdef CONFIG_SECCOMP
658 void arch_seccomp_spec_mitigate(struct task_struct *task)
659 {
660         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
661                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
662 }
663 #endif
664
665 static int ssb_prctl_get(struct task_struct *task)
666 {
667         switch (ssb_mode) {
668         case SPEC_STORE_BYPASS_DISABLE:
669                 return PR_SPEC_DISABLE;
670         case SPEC_STORE_BYPASS_SECCOMP:
671         case SPEC_STORE_BYPASS_PRCTL:
672                 if (task_spec_ssb_force_disable(task))
673                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
674                 if (task_spec_ssb_disable(task))
675                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
676                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
677         default:
678                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
679                         return PR_SPEC_ENABLE;
680                 return PR_SPEC_NOT_AFFECTED;
681         }
682 }
683
684 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
685 {
686         switch (which) {
687         case PR_SPEC_STORE_BYPASS:
688                 return ssb_prctl_get(task);
689         default:
690                 return -ENODEV;
691         }
692 }
693
694 void x86_spec_ctrl_setup_ap(void)
695 {
696         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
697                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
698
699         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
700                 x86_amd_ssb_disable();
701 }
702
703 #undef pr_fmt
704 #define pr_fmt(fmt)     "L1TF: " fmt
705
706 /* Default mitigation for L1TF-affected CPUs */
707 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
708 #if IS_ENABLED(CONFIG_KVM_INTEL)
709 EXPORT_SYMBOL_GPL(l1tf_mitigation);
710 #endif
711 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
712 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
713
714 /*
715  * These CPUs all support 44bits physical address space internally in the
716  * cache but CPUID can report a smaller number of physical address bits.
717  *
718  * The L1TF mitigation uses the top most address bit for the inversion of
719  * non present PTEs. When the installed memory reaches into the top most
720  * address bit due to memory holes, which has been observed on machines
721  * which report 36bits physical address bits and have 32G RAM installed,
722  * then the mitigation range check in l1tf_select_mitigation() triggers.
723  * This is a false positive because the mitigation is still possible due to
724  * the fact that the cache uses 44bit internally. Use the cache bits
725  * instead of the reported physical bits and adjust them on the affected
726  * machines to 44bit if the reported bits are less than 44.
727  */
728 static void override_cache_bits(struct cpuinfo_x86 *c)
729 {
730         if (c->x86 != 6)
731                 return;
732
733         switch (c->x86_model) {
734         case INTEL_FAM6_NEHALEM:
735         case INTEL_FAM6_WESTMERE:
736         case INTEL_FAM6_SANDYBRIDGE:
737         case INTEL_FAM6_IVYBRIDGE:
738         case INTEL_FAM6_HASWELL_CORE:
739         case INTEL_FAM6_HASWELL_ULT:
740         case INTEL_FAM6_HASWELL_GT3E:
741         case INTEL_FAM6_BROADWELL_CORE:
742         case INTEL_FAM6_BROADWELL_GT3E:
743         case INTEL_FAM6_SKYLAKE_MOBILE:
744         case INTEL_FAM6_SKYLAKE_DESKTOP:
745         case INTEL_FAM6_KABYLAKE_MOBILE:
746         case INTEL_FAM6_KABYLAKE_DESKTOP:
747                 if (c->x86_cache_bits < 44)
748                         c->x86_cache_bits = 44;
749                 break;
750         }
751 }
752
753 static void __init l1tf_select_mitigation(void)
754 {
755         u64 half_pa;
756
757         if (!boot_cpu_has_bug(X86_BUG_L1TF))
758                 return;
759
760         override_cache_bits(&boot_cpu_data);
761
762         switch (l1tf_mitigation) {
763         case L1TF_MITIGATION_OFF:
764         case L1TF_MITIGATION_FLUSH_NOWARN:
765         case L1TF_MITIGATION_FLUSH:
766                 break;
767         case L1TF_MITIGATION_FLUSH_NOSMT:
768         case L1TF_MITIGATION_FULL:
769                 cpu_smt_disable(false);
770                 break;
771         case L1TF_MITIGATION_FULL_FORCE:
772                 cpu_smt_disable(true);
773                 break;
774         }
775
776 #if CONFIG_PGTABLE_LEVELS == 2
777         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
778         return;
779 #endif
780
781         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
782         if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
783                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
784                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
785                                 half_pa);
786                 pr_info("However, doing so will make a part of your RAM unusable.\n");
787                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/l1tf.html might help you decide.\n");
788                 return;
789         }
790
791         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
792 }
793
794 static int __init l1tf_cmdline(char *str)
795 {
796         if (!boot_cpu_has_bug(X86_BUG_L1TF))
797                 return 0;
798
799         if (!str)
800                 return -EINVAL;
801
802         if (!strcmp(str, "off"))
803                 l1tf_mitigation = L1TF_MITIGATION_OFF;
804         else if (!strcmp(str, "flush,nowarn"))
805                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
806         else if (!strcmp(str, "flush"))
807                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
808         else if (!strcmp(str, "flush,nosmt"))
809                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
810         else if (!strcmp(str, "full"))
811                 l1tf_mitigation = L1TF_MITIGATION_FULL;
812         else if (!strcmp(str, "full,force"))
813                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
814
815         return 0;
816 }
817 early_param("l1tf", l1tf_cmdline);
818
819 #undef pr_fmt
820
821 #ifdef CONFIG_SYSFS
822
823 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
824
825 #if IS_ENABLED(CONFIG_KVM_INTEL)
826 static const char *l1tf_vmx_states[] = {
827         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
828         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
829         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
830         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
831         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
832         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
833 };
834
835 static ssize_t l1tf_show_state(char *buf)
836 {
837         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
838                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
839
840         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
841             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
842              cpu_smt_control == CPU_SMT_ENABLED))
843                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
844                                l1tf_vmx_states[l1tf_vmx_mitigation]);
845
846         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
847                        l1tf_vmx_states[l1tf_vmx_mitigation],
848                        cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled");
849 }
850 #else
851 static ssize_t l1tf_show_state(char *buf)
852 {
853         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
854 }
855 #endif
856
857 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
858                                char *buf, unsigned int bug)
859 {
860         int ret;
861
862         if (!boot_cpu_has_bug(bug))
863                 return sprintf(buf, "Not affected\n");
864
865         switch (bug) {
866         case X86_BUG_CPU_MELTDOWN:
867                 if (boot_cpu_has(X86_FEATURE_PTI))
868                         return sprintf(buf, "Mitigation: PTI\n");
869
870                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
871                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
872
873                 break;
874
875         case X86_BUG_SPECTRE_V1:
876                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
877
878         case X86_BUG_SPECTRE_V2:
879                 ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
880                                boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
881                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
882                                (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
883                                boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
884                                spectre_v2_module_string());
885                 return ret;
886
887         case X86_BUG_SPEC_STORE_BYPASS:
888                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
889
890         case X86_BUG_L1TF:
891                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
892                         return l1tf_show_state(buf);
893                 break;
894         default:
895                 break;
896         }
897
898         return sprintf(buf, "Vulnerable\n");
899 }
900
901 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
902 {
903         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
904 }
905
906 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
907 {
908         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
909 }
910
911 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
912 {
913         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
914 }
915
916 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
917 {
918         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
919 }
920
921 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
922 {
923         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
924 }
925 #endif