x86/speculation: Support Enhanced IBRS on future CPUs
[linux-2.6-block.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
20 #include <asm/bugs.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/msr.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/pgtable.h>
28 #include <asm/set_memory.h>
29 #include <asm/intel-family.h>
30 #include <asm/hypervisor.h>
31
32 static void __init spectre_v2_select_mitigation(void);
33 static void __init ssb_select_mitigation(void);
34
35 /*
36  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
37  * writes to SPEC_CTRL contain whatever reserved bits have been set.
38  */
39 u64 __ro_after_init x86_spec_ctrl_base;
40 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
41
42 /*
43  * The vendor and possibly platform specific bits which can be modified in
44  * x86_spec_ctrl_base.
45  */
46 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
47
48 /*
49  * AMD specific MSR info for Speculative Store Bypass control.
50  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
51  */
52 u64 __ro_after_init x86_amd_ls_cfg_base;
53 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
54
55 void __init check_bugs(void)
56 {
57         identify_boot_cpu();
58
59         if (!IS_ENABLED(CONFIG_SMP)) {
60                 pr_info("CPU: ");
61                 print_cpu_info(&boot_cpu_data);
62         }
63
64         /*
65          * Read the SPEC_CTRL MSR to account for reserved bits which may
66          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
67          * init code as it is not enumerated and depends on the family.
68          */
69         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
70                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
71
72         /* Allow STIBP in MSR_SPEC_CTRL if supported */
73         if (boot_cpu_has(X86_FEATURE_STIBP))
74                 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
75
76         /* Select the proper spectre mitigation before patching alternatives */
77         spectre_v2_select_mitigation();
78
79         /*
80          * Select proper mitigation for any exposure to the Speculative Store
81          * Bypass vulnerability.
82          */
83         ssb_select_mitigation();
84
85 #ifdef CONFIG_X86_32
86         /*
87          * Check whether we are able to run this kernel safely on SMP.
88          *
89          * - i386 is no longer supported.
90          * - In order to run on anything without a TSC, we need to be
91          *   compiled for a i486.
92          */
93         if (boot_cpu_data.x86 < 4)
94                 panic("Kernel requires i486+ for 'invlpg' and other features");
95
96         init_utsname()->machine[1] =
97                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
98         alternative_instructions();
99
100         fpu__init_check_bugs();
101 #else /* CONFIG_X86_64 */
102         alternative_instructions();
103
104         /*
105          * Make sure the first 2MB area is not mapped by huge pages
106          * There are typically fixed size MTRRs in there and overlapping
107          * MTRRs into large pages causes slow downs.
108          *
109          * Right now we don't do that with gbpages because there seems
110          * very little benefit for that case.
111          */
112         if (!direct_gbpages)
113                 set_memory_4k((unsigned long)__va(0), 1);
114 #endif
115 }
116
117 /* The kernel command line selection */
118 enum spectre_v2_mitigation_cmd {
119         SPECTRE_V2_CMD_NONE,
120         SPECTRE_V2_CMD_AUTO,
121         SPECTRE_V2_CMD_FORCE,
122         SPECTRE_V2_CMD_RETPOLINE,
123         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
124         SPECTRE_V2_CMD_RETPOLINE_AMD,
125 };
126
127 static const char *spectre_v2_strings[] = {
128         [SPECTRE_V2_NONE]                       = "Vulnerable",
129         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
130         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
131         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
132         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
133         [SPECTRE_V2_IBRS_ENHANCED]              = "Mitigation: Enhanced IBRS",
134 };
135
136 #undef pr_fmt
137 #define pr_fmt(fmt)     "Spectre V2 : " fmt
138
139 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
140         SPECTRE_V2_NONE;
141
142 void
143 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
144 {
145         u64 msrval, guestval, hostval = x86_spec_ctrl_base;
146         struct thread_info *ti = current_thread_info();
147
148         /* Is MSR_SPEC_CTRL implemented ? */
149         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
150                 /*
151                  * Restrict guest_spec_ctrl to supported values. Clear the
152                  * modifiable bits in the host base value and or the
153                  * modifiable bits from the guest value.
154                  */
155                 guestval = hostval & ~x86_spec_ctrl_mask;
156                 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
157
158                 /* SSBD controlled in MSR_SPEC_CTRL */
159                 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
160                     static_cpu_has(X86_FEATURE_AMD_SSBD))
161                         hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
162
163                 if (hostval != guestval) {
164                         msrval = setguest ? guestval : hostval;
165                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
166                 }
167         }
168
169         /*
170          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
171          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
172          */
173         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
174             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
175                 return;
176
177         /*
178          * If the host has SSBD mitigation enabled, force it in the host's
179          * virtual MSR value. If its not permanently enabled, evaluate
180          * current's TIF_SSBD thread flag.
181          */
182         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
183                 hostval = SPEC_CTRL_SSBD;
184         else
185                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
186
187         /* Sanitize the guest value */
188         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
189
190         if (hostval != guestval) {
191                 unsigned long tif;
192
193                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
194                                  ssbd_spec_ctrl_to_tif(hostval);
195
196                 speculative_store_bypass_update(tif);
197         }
198 }
199 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
200
201 static void x86_amd_ssb_disable(void)
202 {
203         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
204
205         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
206                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
207         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
208                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
209 }
210
211 #ifdef RETPOLINE
212 static bool spectre_v2_bad_module;
213
214 bool retpoline_module_ok(bool has_retpoline)
215 {
216         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
217                 return true;
218
219         pr_err("System may be vulnerable to spectre v2\n");
220         spectre_v2_bad_module = true;
221         return false;
222 }
223
224 static inline const char *spectre_v2_module_string(void)
225 {
226         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
227 }
228 #else
229 static inline const char *spectre_v2_module_string(void) { return ""; }
230 #endif
231
232 static void __init spec2_print_if_insecure(const char *reason)
233 {
234         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
235                 pr_info("%s selected on command line.\n", reason);
236 }
237
238 static void __init spec2_print_if_secure(const char *reason)
239 {
240         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
241                 pr_info("%s selected on command line.\n", reason);
242 }
243
244 static inline bool retp_compiler(void)
245 {
246         return __is_defined(RETPOLINE);
247 }
248
249 static inline bool match_option(const char *arg, int arglen, const char *opt)
250 {
251         int len = strlen(opt);
252
253         return len == arglen && !strncmp(arg, opt, len);
254 }
255
256 static const struct {
257         const char *option;
258         enum spectre_v2_mitigation_cmd cmd;
259         bool secure;
260 } mitigation_options[] = {
261         { "off",               SPECTRE_V2_CMD_NONE,              false },
262         { "on",                SPECTRE_V2_CMD_FORCE,             true },
263         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
264         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
265         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
266         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
267 };
268
269 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
270 {
271         char arg[20];
272         int ret, i;
273         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
274
275         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
276                 return SPECTRE_V2_CMD_NONE;
277         else {
278                 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
279                 if (ret < 0)
280                         return SPECTRE_V2_CMD_AUTO;
281
282                 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
283                         if (!match_option(arg, ret, mitigation_options[i].option))
284                                 continue;
285                         cmd = mitigation_options[i].cmd;
286                         break;
287                 }
288
289                 if (i >= ARRAY_SIZE(mitigation_options)) {
290                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
291                         return SPECTRE_V2_CMD_AUTO;
292                 }
293         }
294
295         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
296              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
297              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
298             !IS_ENABLED(CONFIG_RETPOLINE)) {
299                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
300                 return SPECTRE_V2_CMD_AUTO;
301         }
302
303         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
304             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
305                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
306                 return SPECTRE_V2_CMD_AUTO;
307         }
308
309         if (mitigation_options[i].secure)
310                 spec2_print_if_secure(mitigation_options[i].option);
311         else
312                 spec2_print_if_insecure(mitigation_options[i].option);
313
314         return cmd;
315 }
316
317 static void __init spectre_v2_select_mitigation(void)
318 {
319         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
320         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
321
322         /*
323          * If the CPU is not affected and the command line mode is NONE or AUTO
324          * then nothing to do.
325          */
326         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
327             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
328                 return;
329
330         switch (cmd) {
331         case SPECTRE_V2_CMD_NONE:
332                 return;
333
334         case SPECTRE_V2_CMD_FORCE:
335         case SPECTRE_V2_CMD_AUTO:
336                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
337                         mode = SPECTRE_V2_IBRS_ENHANCED;
338                         /* Force it so VMEXIT will restore correctly */
339                         x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
340                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
341                         goto specv2_set_mode;
342                 }
343                 if (IS_ENABLED(CONFIG_RETPOLINE))
344                         goto retpoline_auto;
345                 break;
346         case SPECTRE_V2_CMD_RETPOLINE_AMD:
347                 if (IS_ENABLED(CONFIG_RETPOLINE))
348                         goto retpoline_amd;
349                 break;
350         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
351                 if (IS_ENABLED(CONFIG_RETPOLINE))
352                         goto retpoline_generic;
353                 break;
354         case SPECTRE_V2_CMD_RETPOLINE:
355                 if (IS_ENABLED(CONFIG_RETPOLINE))
356                         goto retpoline_auto;
357                 break;
358         }
359         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
360         return;
361
362 retpoline_auto:
363         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
364         retpoline_amd:
365                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
366                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
367                         goto retpoline_generic;
368                 }
369                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
370                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
371                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
372                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
373         } else {
374         retpoline_generic:
375                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
376                                          SPECTRE_V2_RETPOLINE_MINIMAL;
377                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
378         }
379
380 specv2_set_mode:
381         spectre_v2_enabled = mode;
382         pr_info("%s\n", spectre_v2_strings[mode]);
383
384         /*
385          * If spectre v2 protection has been enabled, unconditionally fill
386          * RSB during a context switch; this protects against two independent
387          * issues:
388          *
389          *      - RSB underflow (and switch to BTB) on Skylake+
390          *      - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
391          */
392         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
393         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
394
395         /* Initialize Indirect Branch Prediction Barrier if supported */
396         if (boot_cpu_has(X86_FEATURE_IBPB)) {
397                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
398                 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
399         }
400
401         /*
402          * Retpoline means the kernel is safe because it has no indirect
403          * branches. Enhanced IBRS protects firmware too, so, enable restricted
404          * speculation around firmware calls only when Enhanced IBRS isn't
405          * supported.
406          *
407          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
408          * the user might select retpoline on the kernel command line and if
409          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
410          * enable IBRS around firmware calls.
411          */
412         if (boot_cpu_has(X86_FEATURE_IBRS) && mode != SPECTRE_V2_IBRS_ENHANCED) {
413                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
414                 pr_info("Enabling Restricted Speculation for firmware calls\n");
415         }
416 }
417
418 #undef pr_fmt
419 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
420
421 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
422
423 /* The kernel command line selection */
424 enum ssb_mitigation_cmd {
425         SPEC_STORE_BYPASS_CMD_NONE,
426         SPEC_STORE_BYPASS_CMD_AUTO,
427         SPEC_STORE_BYPASS_CMD_ON,
428         SPEC_STORE_BYPASS_CMD_PRCTL,
429         SPEC_STORE_BYPASS_CMD_SECCOMP,
430 };
431
432 static const char *ssb_strings[] = {
433         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
434         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
435         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
436         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
437 };
438
439 static const struct {
440         const char *option;
441         enum ssb_mitigation_cmd cmd;
442 } ssb_mitigation_options[] = {
443         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
444         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
445         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
446         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
447         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
448 };
449
450 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
451 {
452         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
453         char arg[20];
454         int ret, i;
455
456         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
457                 return SPEC_STORE_BYPASS_CMD_NONE;
458         } else {
459                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
460                                           arg, sizeof(arg));
461                 if (ret < 0)
462                         return SPEC_STORE_BYPASS_CMD_AUTO;
463
464                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
465                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
466                                 continue;
467
468                         cmd = ssb_mitigation_options[i].cmd;
469                         break;
470                 }
471
472                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
473                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
474                         return SPEC_STORE_BYPASS_CMD_AUTO;
475                 }
476         }
477
478         return cmd;
479 }
480
481 static enum ssb_mitigation __init __ssb_select_mitigation(void)
482 {
483         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
484         enum ssb_mitigation_cmd cmd;
485
486         if (!boot_cpu_has(X86_FEATURE_SSBD))
487                 return mode;
488
489         cmd = ssb_parse_cmdline();
490         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
491             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
492              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
493                 return mode;
494
495         switch (cmd) {
496         case SPEC_STORE_BYPASS_CMD_AUTO:
497         case SPEC_STORE_BYPASS_CMD_SECCOMP:
498                 /*
499                  * Choose prctl+seccomp as the default mode if seccomp is
500                  * enabled.
501                  */
502                 if (IS_ENABLED(CONFIG_SECCOMP))
503                         mode = SPEC_STORE_BYPASS_SECCOMP;
504                 else
505                         mode = SPEC_STORE_BYPASS_PRCTL;
506                 break;
507         case SPEC_STORE_BYPASS_CMD_ON:
508                 mode = SPEC_STORE_BYPASS_DISABLE;
509                 break;
510         case SPEC_STORE_BYPASS_CMD_PRCTL:
511                 mode = SPEC_STORE_BYPASS_PRCTL;
512                 break;
513         case SPEC_STORE_BYPASS_CMD_NONE:
514                 break;
515         }
516
517         /*
518          * We have three CPU feature flags that are in play here:
519          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
520          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
521          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
522          */
523         if (mode == SPEC_STORE_BYPASS_DISABLE) {
524                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
525                 /*
526                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
527                  * use a completely different MSR and bit dependent on family.
528                  */
529                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
530                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
531                         x86_amd_ssb_disable();
532                 } else {
533                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
534                         x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
535                         wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
536                 }
537         }
538
539         return mode;
540 }
541
542 static void ssb_select_mitigation(void)
543 {
544         ssb_mode = __ssb_select_mitigation();
545
546         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
547                 pr_info("%s\n", ssb_strings[ssb_mode]);
548 }
549
550 #undef pr_fmt
551 #define pr_fmt(fmt)     "Speculation prctl: " fmt
552
553 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
554 {
555         bool update;
556
557         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
558             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
559                 return -ENXIO;
560
561         switch (ctrl) {
562         case PR_SPEC_ENABLE:
563                 /* If speculation is force disabled, enable is not allowed */
564                 if (task_spec_ssb_force_disable(task))
565                         return -EPERM;
566                 task_clear_spec_ssb_disable(task);
567                 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
568                 break;
569         case PR_SPEC_DISABLE:
570                 task_set_spec_ssb_disable(task);
571                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
572                 break;
573         case PR_SPEC_FORCE_DISABLE:
574                 task_set_spec_ssb_disable(task);
575                 task_set_spec_ssb_force_disable(task);
576                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
577                 break;
578         default:
579                 return -ERANGE;
580         }
581
582         /*
583          * If being set on non-current task, delay setting the CPU
584          * mitigation until it is next scheduled.
585          */
586         if (task == current && update)
587                 speculative_store_bypass_update_current();
588
589         return 0;
590 }
591
592 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
593                              unsigned long ctrl)
594 {
595         switch (which) {
596         case PR_SPEC_STORE_BYPASS:
597                 return ssb_prctl_set(task, ctrl);
598         default:
599                 return -ENODEV;
600         }
601 }
602
603 #ifdef CONFIG_SECCOMP
604 void arch_seccomp_spec_mitigate(struct task_struct *task)
605 {
606         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
607                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
608 }
609 #endif
610
611 static int ssb_prctl_get(struct task_struct *task)
612 {
613         switch (ssb_mode) {
614         case SPEC_STORE_BYPASS_DISABLE:
615                 return PR_SPEC_DISABLE;
616         case SPEC_STORE_BYPASS_SECCOMP:
617         case SPEC_STORE_BYPASS_PRCTL:
618                 if (task_spec_ssb_force_disable(task))
619                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
620                 if (task_spec_ssb_disable(task))
621                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
622                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
623         default:
624                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
625                         return PR_SPEC_ENABLE;
626                 return PR_SPEC_NOT_AFFECTED;
627         }
628 }
629
630 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
631 {
632         switch (which) {
633         case PR_SPEC_STORE_BYPASS:
634                 return ssb_prctl_get(task);
635         default:
636                 return -ENODEV;
637         }
638 }
639
640 void x86_spec_ctrl_setup_ap(void)
641 {
642         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
643                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
644
645         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
646                 x86_amd_ssb_disable();
647 }
648
649 #ifdef CONFIG_SYSFS
650
651 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
652                                char *buf, unsigned int bug)
653 {
654         if (!boot_cpu_has_bug(bug))
655                 return sprintf(buf, "Not affected\n");
656
657         switch (bug) {
658         case X86_BUG_CPU_MELTDOWN:
659                 if (boot_cpu_has(X86_FEATURE_PTI))
660                         return sprintf(buf, "Mitigation: PTI\n");
661
662                 if (hypervisor_is_type(X86_HYPER_XEN_PV))
663                         return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
664
665                 break;
666
667         case X86_BUG_SPECTRE_V1:
668                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
669
670         case X86_BUG_SPECTRE_V2:
671                 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
672                                boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
673                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
674                                spectre_v2_module_string());
675
676         case X86_BUG_SPEC_STORE_BYPASS:
677                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
678
679         default:
680                 break;
681         }
682
683         return sprintf(buf, "Vulnerable\n");
684 }
685
686 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
687 {
688         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
689 }
690
691 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
692 {
693         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
694 }
695
696 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
697 {
698         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
699 }
700
701 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
702 {
703         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
704 }
705 #endif