x86/cpufeatures: Disentangle SSBD enumeration
[linux-2.6-block.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17
18 #include <asm/spec-ctrl.h>
19 #include <asm/cmdline.h>
20 #include <asm/bugs.h>
21 #include <asm/processor.h>
22 #include <asm/processor-flags.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/msr.h>
25 #include <asm/paravirt.h>
26 #include <asm/alternative.h>
27 #include <asm/pgtable.h>
28 #include <asm/set_memory.h>
29 #include <asm/intel-family.h>
30
31 static void __init spectre_v2_select_mitigation(void);
32 static void __init ssb_select_mitigation(void);
33
34 /*
35  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
36  * writes to SPEC_CTRL contain whatever reserved bits have been set.
37  */
38 u64 __ro_after_init x86_spec_ctrl_base;
39
40 /*
41  * The vendor and possibly platform specific bits which can be modified in
42  * x86_spec_ctrl_base.
43  */
44 static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
45
46 /*
47  * AMD specific MSR info for Speculative Store Bypass control.
48  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
49  */
50 u64 __ro_after_init x86_amd_ls_cfg_base;
51 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
52
53 void __init check_bugs(void)
54 {
55         identify_boot_cpu();
56
57         if (!IS_ENABLED(CONFIG_SMP)) {
58                 pr_info("CPU: ");
59                 print_cpu_info(&boot_cpu_data);
60         }
61
62         /*
63          * Read the SPEC_CTRL MSR to account for reserved bits which may
64          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
65          * init code as it is not enumerated and depends on the family.
66          */
67         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
68                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
69
70         /* Select the proper spectre mitigation before patching alternatives */
71         spectre_v2_select_mitigation();
72
73         /*
74          * Select proper mitigation for any exposure to the Speculative Store
75          * Bypass vulnerability.
76          */
77         ssb_select_mitigation();
78
79 #ifdef CONFIG_X86_32
80         /*
81          * Check whether we are able to run this kernel safely on SMP.
82          *
83          * - i386 is no longer supported.
84          * - In order to run on anything without a TSC, we need to be
85          *   compiled for a i486.
86          */
87         if (boot_cpu_data.x86 < 4)
88                 panic("Kernel requires i486+ for 'invlpg' and other features");
89
90         init_utsname()->machine[1] =
91                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
92         alternative_instructions();
93
94         fpu__init_check_bugs();
95 #else /* CONFIG_X86_64 */
96         alternative_instructions();
97
98         /*
99          * Make sure the first 2MB area is not mapped by huge pages
100          * There are typically fixed size MTRRs in there and overlapping
101          * MTRRs into large pages causes slow downs.
102          *
103          * Right now we don't do that with gbpages because there seems
104          * very little benefit for that case.
105          */
106         if (!direct_gbpages)
107                 set_memory_4k((unsigned long)__va(0), 1);
108 #endif
109 }
110
111 /* The kernel command line selection */
112 enum spectre_v2_mitigation_cmd {
113         SPECTRE_V2_CMD_NONE,
114         SPECTRE_V2_CMD_AUTO,
115         SPECTRE_V2_CMD_FORCE,
116         SPECTRE_V2_CMD_RETPOLINE,
117         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
118         SPECTRE_V2_CMD_RETPOLINE_AMD,
119 };
120
121 static const char *spectre_v2_strings[] = {
122         [SPECTRE_V2_NONE]                       = "Vulnerable",
123         [SPECTRE_V2_RETPOLINE_MINIMAL]          = "Vulnerable: Minimal generic ASM retpoline",
124         [SPECTRE_V2_RETPOLINE_MINIMAL_AMD]      = "Vulnerable: Minimal AMD ASM retpoline",
125         [SPECTRE_V2_RETPOLINE_GENERIC]          = "Mitigation: Full generic retpoline",
126         [SPECTRE_V2_RETPOLINE_AMD]              = "Mitigation: Full AMD retpoline",
127 };
128
129 #undef pr_fmt
130 #define pr_fmt(fmt)     "Spectre V2 : " fmt
131
132 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
133         SPECTRE_V2_NONE;
134
135 void x86_spec_ctrl_set(u64 val)
136 {
137         if (val & x86_spec_ctrl_mask)
138                 WARN_ONCE(1, "SPEC_CTRL MSR value 0x%16llx is unknown.\n", val);
139         else
140                 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base | val);
141 }
142 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
143
144 u64 x86_spec_ctrl_get_default(void)
145 {
146         u64 msrval = x86_spec_ctrl_base;
147
148         if (static_cpu_has(X86_FEATURE_SPEC_CTRL))
149                 msrval |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
150         return msrval;
151 }
152 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
153
154 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
155 {
156         u64 host = x86_spec_ctrl_base;
157
158         /* Is MSR_SPEC_CTRL implemented ? */
159         if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
160                 return;
161
162         /* SSBD controlled in MSR_SPEC_CTRL */
163         if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
164                 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
165
166         if (host != guest_spec_ctrl)
167                 wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
168 }
169 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
170
171 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
172 {
173         u64 host = x86_spec_ctrl_base;
174
175         /* Is MSR_SPEC_CTRL implemented ? */
176         if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
177                 return;
178
179         /* SSBD controlled in MSR_SPEC_CTRL */
180         if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
181                 host |= ssbd_tif_to_spec_ctrl(current_thread_info()->flags);
182
183         if (host != guest_spec_ctrl)
184                 wrmsrl(MSR_IA32_SPEC_CTRL, host);
185 }
186 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
187
188 static void x86_amd_ssb_disable(void)
189 {
190         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
191
192         if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
193                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
194 }
195
196 #ifdef RETPOLINE
197 static bool spectre_v2_bad_module;
198
199 bool retpoline_module_ok(bool has_retpoline)
200 {
201         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
202                 return true;
203
204         pr_err("System may be vulnerable to spectre v2\n");
205         spectre_v2_bad_module = true;
206         return false;
207 }
208
209 static inline const char *spectre_v2_module_string(void)
210 {
211         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
212 }
213 #else
214 static inline const char *spectre_v2_module_string(void) { return ""; }
215 #endif
216
217 static void __init spec2_print_if_insecure(const char *reason)
218 {
219         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
220                 pr_info("%s selected on command line.\n", reason);
221 }
222
223 static void __init spec2_print_if_secure(const char *reason)
224 {
225         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
226                 pr_info("%s selected on command line.\n", reason);
227 }
228
229 static inline bool retp_compiler(void)
230 {
231         return __is_defined(RETPOLINE);
232 }
233
234 static inline bool match_option(const char *arg, int arglen, const char *opt)
235 {
236         int len = strlen(opt);
237
238         return len == arglen && !strncmp(arg, opt, len);
239 }
240
241 static const struct {
242         const char *option;
243         enum spectre_v2_mitigation_cmd cmd;
244         bool secure;
245 } mitigation_options[] = {
246         { "off",               SPECTRE_V2_CMD_NONE,              false },
247         { "on",                SPECTRE_V2_CMD_FORCE,             true },
248         { "retpoline",         SPECTRE_V2_CMD_RETPOLINE,         false },
249         { "retpoline,amd",     SPECTRE_V2_CMD_RETPOLINE_AMD,     false },
250         { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
251         { "auto",              SPECTRE_V2_CMD_AUTO,              false },
252 };
253
254 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
255 {
256         char arg[20];
257         int ret, i;
258         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
259
260         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2"))
261                 return SPECTRE_V2_CMD_NONE;
262         else {
263                 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
264                 if (ret < 0)
265                         return SPECTRE_V2_CMD_AUTO;
266
267                 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
268                         if (!match_option(arg, ret, mitigation_options[i].option))
269                                 continue;
270                         cmd = mitigation_options[i].cmd;
271                         break;
272                 }
273
274                 if (i >= ARRAY_SIZE(mitigation_options)) {
275                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
276                         return SPECTRE_V2_CMD_AUTO;
277                 }
278         }
279
280         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
281              cmd == SPECTRE_V2_CMD_RETPOLINE_AMD ||
282              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) &&
283             !IS_ENABLED(CONFIG_RETPOLINE)) {
284                 pr_err("%s selected but not compiled in. Switching to AUTO select\n", mitigation_options[i].option);
285                 return SPECTRE_V2_CMD_AUTO;
286         }
287
288         if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD &&
289             boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
290                 pr_err("retpoline,amd selected but CPU is not AMD. Switching to AUTO select\n");
291                 return SPECTRE_V2_CMD_AUTO;
292         }
293
294         if (mitigation_options[i].secure)
295                 spec2_print_if_secure(mitigation_options[i].option);
296         else
297                 spec2_print_if_insecure(mitigation_options[i].option);
298
299         return cmd;
300 }
301
302 /* Check for Skylake-like CPUs (for RSB handling) */
303 static bool __init is_skylake_era(void)
304 {
305         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
306             boot_cpu_data.x86 == 6) {
307                 switch (boot_cpu_data.x86_model) {
308                 case INTEL_FAM6_SKYLAKE_MOBILE:
309                 case INTEL_FAM6_SKYLAKE_DESKTOP:
310                 case INTEL_FAM6_SKYLAKE_X:
311                 case INTEL_FAM6_KABYLAKE_MOBILE:
312                 case INTEL_FAM6_KABYLAKE_DESKTOP:
313                         return true;
314                 }
315         }
316         return false;
317 }
318
319 static void __init spectre_v2_select_mitigation(void)
320 {
321         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
322         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
323
324         /*
325          * If the CPU is not affected and the command line mode is NONE or AUTO
326          * then nothing to do.
327          */
328         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
329             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
330                 return;
331
332         switch (cmd) {
333         case SPECTRE_V2_CMD_NONE:
334                 return;
335
336         case SPECTRE_V2_CMD_FORCE:
337         case SPECTRE_V2_CMD_AUTO:
338                 if (IS_ENABLED(CONFIG_RETPOLINE))
339                         goto retpoline_auto;
340                 break;
341         case SPECTRE_V2_CMD_RETPOLINE_AMD:
342                 if (IS_ENABLED(CONFIG_RETPOLINE))
343                         goto retpoline_amd;
344                 break;
345         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
346                 if (IS_ENABLED(CONFIG_RETPOLINE))
347                         goto retpoline_generic;
348                 break;
349         case SPECTRE_V2_CMD_RETPOLINE:
350                 if (IS_ENABLED(CONFIG_RETPOLINE))
351                         goto retpoline_auto;
352                 break;
353         }
354         pr_err("Spectre mitigation: kernel not compiled with retpoline; no mitigation available!");
355         return;
356
357 retpoline_auto:
358         if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
359         retpoline_amd:
360                 if (!boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
361                         pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n");
362                         goto retpoline_generic;
363                 }
364                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD :
365                                          SPECTRE_V2_RETPOLINE_MINIMAL_AMD;
366                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD);
367                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
368         } else {
369         retpoline_generic:
370                 mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC :
371                                          SPECTRE_V2_RETPOLINE_MINIMAL;
372                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
373         }
374
375         spectre_v2_enabled = mode;
376         pr_info("%s\n", spectre_v2_strings[mode]);
377
378         /*
379          * If neither SMEP nor PTI are available, there is a risk of
380          * hitting userspace addresses in the RSB after a context switch
381          * from a shallow call stack to a deeper one. To prevent this fill
382          * the entire RSB, even when using IBRS.
383          *
384          * Skylake era CPUs have a separate issue with *underflow* of the
385          * RSB, when they will predict 'ret' targets from the generic BTB.
386          * The proper mitigation for this is IBRS. If IBRS is not supported
387          * or deactivated in favour of retpolines the RSB fill on context
388          * switch is required.
389          */
390         if ((!boot_cpu_has(X86_FEATURE_PTI) &&
391              !boot_cpu_has(X86_FEATURE_SMEP)) || is_skylake_era()) {
392                 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
393                 pr_info("Spectre v2 mitigation: Filling RSB on context switch\n");
394         }
395
396         /* Initialize Indirect Branch Prediction Barrier if supported */
397         if (boot_cpu_has(X86_FEATURE_IBPB)) {
398                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
399                 pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n");
400         }
401
402         /*
403          * Retpoline means the kernel is safe because it has no indirect
404          * branches. But firmware isn't, so use IBRS to protect that.
405          */
406         if (boot_cpu_has(X86_FEATURE_IBRS)) {
407                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
408                 pr_info("Enabling Restricted Speculation for firmware calls\n");
409         }
410 }
411
412 #undef pr_fmt
413 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
414
415 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
416
417 /* The kernel command line selection */
418 enum ssb_mitigation_cmd {
419         SPEC_STORE_BYPASS_CMD_NONE,
420         SPEC_STORE_BYPASS_CMD_AUTO,
421         SPEC_STORE_BYPASS_CMD_ON,
422         SPEC_STORE_BYPASS_CMD_PRCTL,
423         SPEC_STORE_BYPASS_CMD_SECCOMP,
424 };
425
426 static const char *ssb_strings[] = {
427         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
428         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
429         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
430         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
431 };
432
433 static const struct {
434         const char *option;
435         enum ssb_mitigation_cmd cmd;
436 } ssb_mitigation_options[] = {
437         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
438         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
439         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
440         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
441         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
442 };
443
444 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
445 {
446         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
447         char arg[20];
448         int ret, i;
449
450         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable")) {
451                 return SPEC_STORE_BYPASS_CMD_NONE;
452         } else {
453                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
454                                           arg, sizeof(arg));
455                 if (ret < 0)
456                         return SPEC_STORE_BYPASS_CMD_AUTO;
457
458                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
459                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
460                                 continue;
461
462                         cmd = ssb_mitigation_options[i].cmd;
463                         break;
464                 }
465
466                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
467                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
468                         return SPEC_STORE_BYPASS_CMD_AUTO;
469                 }
470         }
471
472         return cmd;
473 }
474
475 static enum ssb_mitigation __init __ssb_select_mitigation(void)
476 {
477         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
478         enum ssb_mitigation_cmd cmd;
479
480         if (!boot_cpu_has(X86_FEATURE_SSBD))
481                 return mode;
482
483         cmd = ssb_parse_cmdline();
484         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
485             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
486              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
487                 return mode;
488
489         switch (cmd) {
490         case SPEC_STORE_BYPASS_CMD_AUTO:
491         case SPEC_STORE_BYPASS_CMD_SECCOMP:
492                 /*
493                  * Choose prctl+seccomp as the default mode if seccomp is
494                  * enabled.
495                  */
496                 if (IS_ENABLED(CONFIG_SECCOMP))
497                         mode = SPEC_STORE_BYPASS_SECCOMP;
498                 else
499                         mode = SPEC_STORE_BYPASS_PRCTL;
500                 break;
501         case SPEC_STORE_BYPASS_CMD_ON:
502                 mode = SPEC_STORE_BYPASS_DISABLE;
503                 break;
504         case SPEC_STORE_BYPASS_CMD_PRCTL:
505                 mode = SPEC_STORE_BYPASS_PRCTL;
506                 break;
507         case SPEC_STORE_BYPASS_CMD_NONE:
508                 break;
509         }
510
511         /*
512          * We have three CPU feature flags that are in play here:
513          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
514          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
515          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
516          */
517         if (mode == SPEC_STORE_BYPASS_DISABLE) {
518                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
519                 /*
520                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD uses
521                  * a completely different MSR and bit dependent on family.
522                  */
523                 switch (boot_cpu_data.x86_vendor) {
524                 case X86_VENDOR_INTEL:
525                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
526                         x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
527                         x86_spec_ctrl_set(SPEC_CTRL_SSBD);
528                         break;
529                 case X86_VENDOR_AMD:
530                         x86_amd_ssb_disable();
531                         break;
532                 }
533         }
534
535         return mode;
536 }
537
538 static void ssb_select_mitigation(void)
539 {
540         ssb_mode = __ssb_select_mitigation();
541
542         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
543                 pr_info("%s\n", ssb_strings[ssb_mode]);
544 }
545
546 #undef pr_fmt
547 #define pr_fmt(fmt)     "Speculation prctl: " fmt
548
549 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
550 {
551         bool update;
552
553         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
554             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
555                 return -ENXIO;
556
557         switch (ctrl) {
558         case PR_SPEC_ENABLE:
559                 /* If speculation is force disabled, enable is not allowed */
560                 if (task_spec_ssb_force_disable(task))
561                         return -EPERM;
562                 task_clear_spec_ssb_disable(task);
563                 update = test_and_clear_tsk_thread_flag(task, TIF_SSBD);
564                 break;
565         case PR_SPEC_DISABLE:
566                 task_set_spec_ssb_disable(task);
567                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
568                 break;
569         case PR_SPEC_FORCE_DISABLE:
570                 task_set_spec_ssb_disable(task);
571                 task_set_spec_ssb_force_disable(task);
572                 update = !test_and_set_tsk_thread_flag(task, TIF_SSBD);
573                 break;
574         default:
575                 return -ERANGE;
576         }
577
578         /*
579          * If being set on non-current task, delay setting the CPU
580          * mitigation until it is next scheduled.
581          */
582         if (task == current && update)
583                 speculative_store_bypass_update();
584
585         return 0;
586 }
587
588 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
589                              unsigned long ctrl)
590 {
591         switch (which) {
592         case PR_SPEC_STORE_BYPASS:
593                 return ssb_prctl_set(task, ctrl);
594         default:
595                 return -ENODEV;
596         }
597 }
598
599 #ifdef CONFIG_SECCOMP
600 void arch_seccomp_spec_mitigate(struct task_struct *task)
601 {
602         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
603                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
604 }
605 #endif
606
607 static int ssb_prctl_get(struct task_struct *task)
608 {
609         switch (ssb_mode) {
610         case SPEC_STORE_BYPASS_DISABLE:
611                 return PR_SPEC_DISABLE;
612         case SPEC_STORE_BYPASS_SECCOMP:
613         case SPEC_STORE_BYPASS_PRCTL:
614                 if (task_spec_ssb_force_disable(task))
615                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
616                 if (task_spec_ssb_disable(task))
617                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
618                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
619         default:
620                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
621                         return PR_SPEC_ENABLE;
622                 return PR_SPEC_NOT_AFFECTED;
623         }
624 }
625
626 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
627 {
628         switch (which) {
629         case PR_SPEC_STORE_BYPASS:
630                 return ssb_prctl_get(task);
631         default:
632                 return -ENODEV;
633         }
634 }
635
636 void x86_spec_ctrl_setup_ap(void)
637 {
638         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
639                 x86_spec_ctrl_set(x86_spec_ctrl_base & ~x86_spec_ctrl_mask);
640
641         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
642                 x86_amd_ssb_disable();
643 }
644
645 #ifdef CONFIG_SYSFS
646
647 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
648                                char *buf, unsigned int bug)
649 {
650         if (!boot_cpu_has_bug(bug))
651                 return sprintf(buf, "Not affected\n");
652
653         switch (bug) {
654         case X86_BUG_CPU_MELTDOWN:
655                 if (boot_cpu_has(X86_FEATURE_PTI))
656                         return sprintf(buf, "Mitigation: PTI\n");
657
658                 break;
659
660         case X86_BUG_SPECTRE_V1:
661                 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
662
663         case X86_BUG_SPECTRE_V2:
664                 return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
665                                boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
666                                boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
667                                spectre_v2_module_string());
668
669         case X86_BUG_SPEC_STORE_BYPASS:
670                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
671
672         default:
673                 break;
674         }
675
676         return sprintf(buf, "Vulnerable\n");
677 }
678
679 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
680 {
681         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
682 }
683
684 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
685 {
686         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
687 }
688
689 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
690 {
691         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
692 }
693
694 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
695 {
696         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
697 }
698 #endif