Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1353ebb4 | 2 | /* |
1353ebb4 JF |
3 | * Copyright (C) 1994 Linus Torvalds |
4 | * | |
5 | * Cyrix stuff, June 1998 by: | |
6 | * - Rafael R. Reilova (moved everything from head.S), | |
7 | * <rreilova@ececs.uc.edu> | |
8 | * - Channing Corn (tests & fixes), | |
9 | * - Andrew D. Balsa (code cleanup). | |
10 | */ | |
11 | #include <linux/init.h> | |
61dc0f55 | 12 | #include <linux/cpu.h> |
caf7501a | 13 | #include <linux/module.h> |
a73ec77e TG |
14 | #include <linux/nospec.h> |
15 | #include <linux/prctl.h> | |
a74cfffb | 16 | #include <linux/sched/smt.h> |
65fddcfc | 17 | #include <linux/pgtable.h> |
44a3918c | 18 | #include <linux/bpf.h> |
da285121 | 19 | |
28a27752 | 20 | #include <asm/spec-ctrl.h> |
da285121 | 21 | #include <asm/cmdline.h> |
91eb1b79 | 22 | #include <asm/bugs.h> |
1353ebb4 | 23 | #include <asm/processor.h> |
7ebad705 | 24 | #include <asm/processor-flags.h> |
b56d2795 | 25 | #include <asm/fpu/api.h> |
1353ebb4 | 26 | #include <asm/msr.h> |
72c6d2db | 27 | #include <asm/vmx.h> |
1353ebb4 | 28 | #include <asm/paravirt.h> |
8a28b022 | 29 | #include <asm/cpu_device_id.h> |
17dbca11 | 30 | #include <asm/e820/api.h> |
6cb2b08f | 31 | #include <asm/hypervisor.h> |
f29dfa53 | 32 | #include <asm/tlbflush.h> |
bb5525a5 | 33 | #include <asm/cpu.h> |
1353ebb4 | 34 | |
ad3bc25a BP |
35 | #include "cpu.h" |
36 | ||
559c758b DK |
37 | /* |
38 | * Speculation Vulnerability Handling | |
39 | * | |
40 | * Each vulnerability is handled with the following functions: | |
41 | * <vuln>_select_mitigation() -- Selects a mitigation to use. This should | |
42 | * take into account all relevant command line | |
43 | * options. | |
44 | * <vuln>_update_mitigation() -- This is called after all vulnerabilities have | |
45 | * selected a mitigation, in case the selection | |
46 | * may want to change based on other choices | |
47 | * made. This function is optional. | |
48 | * <vuln>_apply_mitigation() -- Enable the selected mitigation. | |
49 | * | |
50 | * The compile-time mitigation in all cases should be AUTO. An explicit | |
51 | * command-line option can override AUTO. If no such option is | |
52 | * provided, <vuln>_select_mitigation() will override AUTO to the best | |
53 | * mitigation option. | |
54 | */ | |
55 | ||
a2059825 | 56 | static void __init spectre_v1_select_mitigation(void); |
46d5925b | 57 | static void __init spectre_v1_apply_mitigation(void); |
da285121 | 58 | static void __init spectre_v2_select_mitigation(void); |
480e803d DK |
59 | static void __init spectre_v2_update_mitigation(void); |
60 | static void __init spectre_v2_apply_mitigation(void); | |
166115c0 | 61 | static void __init retbleed_select_mitigation(void); |
e3b78a7a DK |
62 | static void __init retbleed_update_mitigation(void); |
63 | static void __init retbleed_apply_mitigation(void); | |
166115c0 | 64 | static void __init spectre_v2_user_select_mitigation(void); |
ddfca943 DK |
65 | static void __init spectre_v2_user_update_mitigation(void); |
66 | static void __init spectre_v2_user_apply_mitigation(void); | |
24f7fc83 | 67 | static void __init ssb_select_mitigation(void); |
5ece59a2 | 68 | static void __init ssb_apply_mitigation(void); |
17dbca11 | 69 | static void __init l1tf_select_mitigation(void); |
d43ba2dc | 70 | static void __init l1tf_apply_mitigation(void); |
bc124170 | 71 | static void __init mds_select_mitigation(void); |
559c758b DK |
72 | static void __init mds_update_mitigation(void); |
73 | static void __init mds_apply_mitigation(void); | |
1b42f017 | 74 | static void __init taa_select_mitigation(void); |
bdd7fce7 DK |
75 | static void __init taa_update_mitigation(void); |
76 | static void __init taa_apply_mitigation(void); | |
8cb861e9 | 77 | static void __init mmio_select_mitigation(void); |
4a5a04e6 DK |
78 | static void __init mmio_update_mitigation(void); |
79 | static void __init mmio_apply_mitigation(void); | |
203d81f8 DK |
80 | static void __init rfds_select_mitigation(void); |
81 | static void __init rfds_update_mitigation(void); | |
82 | static void __init rfds_apply_mitigation(void); | |
7e5b3c26 | 83 | static void __init srbds_select_mitigation(void); |
2178ac58 | 84 | static void __init srbds_apply_mitigation(void); |
b5f06f64 | 85 | static void __init l1d_flush_select_mitigation(void); |
fb3bd914 | 86 | static void __init srso_select_mitigation(void); |
1f4bb068 DK |
87 | static void __init srso_update_mitigation(void); |
88 | static void __init srso_apply_mitigation(void); | |
8974eb58 | 89 | static void __init gds_select_mitigation(void); |
9dcad2fb | 90 | static void __init gds_apply_mitigation(void); |
efe31382 DK |
91 | static void __init bhi_select_mitigation(void); |
92 | static void __init bhi_update_mitigation(void); | |
93 | static void __init bhi_apply_mitigation(void); | |
f4818881 | 94 | static void __init its_select_mitigation(void); |
61ab72c2 DK |
95 | static void __init its_update_mitigation(void); |
96 | static void __init its_apply_mitigation(void); | |
d8010d4b BPA |
97 | static void __init tsa_select_mitigation(void); |
98 | static void __init tsa_apply_mitigation(void); | |
da285121 | 99 | |
caa0ff24 | 100 | /* The base value of the SPEC_CTRL MSR without task-specific bits set */ |
53c613fe | 101 | u64 x86_spec_ctrl_base; |
fa8ac498 | 102 | EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); |
caa0ff24 PZ |
103 | |
104 | /* The current value of the SPEC_CTRL MSR with task-specific bits set */ | |
105 | DEFINE_PER_CPU(u64, x86_spec_ctrl_current); | |
ca3ec9e5 | 106 | EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current); |
caa0ff24 | 107 | |
1b5277c0 | 108 | u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; |
1b5277c0 | 109 | |
d0485730 | 110 | static u64 __ro_after_init x86_arch_cap_msr; |
cb2db5bb | 111 | |
53c613fe | 112 | static DEFINE_MUTEX(spec_ctrl_mutex); |
1b86883c | 113 | |
34a3cae7 | 114 | void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk; |
095b8303 | 115 | |
f4818881 PG |
116 | static void __init set_return_thunk(void *thunk) |
117 | { | |
118 | if (x86_return_thunk != __x86_return_thunk) | |
119 | pr_warn("x86/bugs: return thunk changed\n"); | |
120 | ||
121 | x86_return_thunk = thunk; | |
122 | } | |
123 | ||
66065157 PG |
124 | /* Update SPEC_CTRL MSR and its cached copy unconditionally */ |
125 | static void update_spec_ctrl(u64 val) | |
126 | { | |
127 | this_cpu_write(x86_spec_ctrl_current, val); | |
78255eb2 | 128 | wrmsrq(MSR_IA32_SPEC_CTRL, val); |
66065157 PG |
129 | } |
130 | ||
caa0ff24 PZ |
131 | /* |
132 | * Keep track of the SPEC_CTRL MSR value for the current task, which may differ | |
133 | * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update(). | |
134 | */ | |
66065157 | 135 | void update_spec_ctrl_cond(u64 val) |
caa0ff24 PZ |
136 | { |
137 | if (this_cpu_read(x86_spec_ctrl_current) == val) | |
138 | return; | |
139 | ||
140 | this_cpu_write(x86_spec_ctrl_current, val); | |
c779bc1a PZ |
141 | |
142 | /* | |
143 | * When KERNEL_IBRS this MSR is written on return-to-user, unless | |
144 | * forced the update can be delayed until that time. | |
145 | */ | |
66065157 | 146 | if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) |
78255eb2 | 147 | wrmsrq(MSR_IA32_SPEC_CTRL, val); |
caa0ff24 PZ |
148 | } |
149 | ||
9b461a6f | 150 | noinstr u64 spec_ctrl_current(void) |
bf5835bc PZ |
151 | { |
152 | return this_cpu_read(x86_spec_ctrl_current); | |
153 | } | |
154 | EXPORT_SYMBOL_GPL(spec_ctrl_current); | |
155 | ||
764f3c21 KRW |
156 | /* |
157 | * AMD specific MSR info for Speculative Store Bypass control. | |
9f65fb29 | 158 | * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu(). |
764f3c21 KRW |
159 | */ |
160 | u64 __ro_after_init x86_amd_ls_cfg_base; | |
9f65fb29 | 161 | u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; |
764f3c21 | 162 | |
aa77bfb3 | 163 | /* Control conditional STIBP in switch_to() */ |
fa1202ef | 164 | DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); |
4c71a2b6 TG |
165 | /* Control conditional IBPB in switch_mm() */ |
166 | DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); | |
167 | /* Control unconditional IBPB in switch_mm() */ | |
168 | DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); | |
fa1202ef | 169 | |
80dacb08 YA |
170 | /* Control IBPB on vCPU load */ |
171 | DEFINE_STATIC_KEY_FALSE(switch_vcpu_ibpb); | |
172 | EXPORT_SYMBOL_GPL(switch_vcpu_ibpb); | |
173 | ||
f9af88a3 BPA |
174 | /* Control CPU buffer clear before idling (halt, mwait) */ |
175 | DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear); | |
176 | EXPORT_SYMBOL_GPL(cpu_buf_idle_clear); | |
04dcbdb8 | 177 | |
b5f06f64 BS |
178 | /* |
179 | * Controls whether l1d flush based mitigations are enabled, | |
180 | * based on hw features and admin setting via boot parameter | |
181 | * defaults to false | |
182 | */ | |
183 | DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); | |
184 | ||
d9b79111 PG |
185 | /* |
186 | * Controls CPU Fill buffer clear before VMenter. This is a subset of | |
187 | * X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only | |
188 | * mitigation is required. | |
189 | */ | |
190 | DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear); | |
191 | EXPORT_SYMBOL_GPL(cpu_buf_vm_clear); | |
8cb861e9 | 192 | |
7c7077a7 | 193 | void __init cpu_select_mitigations(void) |
1353ebb4 | 194 | { |
1b86883c KRW |
195 | /* |
196 | * Read the SPEC_CTRL MSR to account for reserved bits which may | |
764f3c21 KRW |
197 | * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD |
198 | * init code as it is not enumerated and depends on the family. | |
1b86883c | 199 | */ |
0125acda | 200 | if (cpu_feature_enabled(X86_FEATURE_MSR_SPEC_CTRL)) { |
c435e608 | 201 | rdmsrq(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); |
1b86883c | 202 | |
0125acda BL |
203 | /* |
204 | * Previously running kernel (kexec), may have some controls | |
205 | * turned ON. Clear them and let the mitigations setup below | |
206 | * rediscover them based on configuration. | |
207 | */ | |
208 | x86_spec_ctrl_base &= ~SPEC_CTRL_MITIGATIONS_MASK; | |
209 | } | |
210 | ||
d0485730 | 211 | x86_arch_cap_msr = x86_read_arch_cap_msr(); |
cb2db5bb | 212 | |
a2059825 JP |
213 | /* Select the proper CPU mitigations before patching alternatives: */ |
214 | spectre_v1_select_mitigation(); | |
166115c0 | 215 | spectre_v2_select_mitigation(); |
7fbf47c7 | 216 | retbleed_select_mitigation(); |
166115c0 | 217 | spectre_v2_user_select_mitigation(); |
24f7fc83 | 218 | ssb_select_mitigation(); |
17dbca11 | 219 | l1tf_select_mitigation(); |
559c758b | 220 | mds_select_mitigation(); |
bdd7fce7 | 221 | taa_select_mitigation(); |
4a5a04e6 | 222 | mmio_select_mitigation(); |
203d81f8 | 223 | rfds_select_mitigation(); |
7e5b3c26 | 224 | srbds_select_mitigation(); |
b5f06f64 | 225 | l1d_flush_select_mitigation(); |
fb3bd914 | 226 | srso_select_mitigation(); |
8974eb58 | 227 | gds_select_mitigation(); |
f4818881 | 228 | its_select_mitigation(); |
efe31382 | 229 | bhi_select_mitigation(); |
d8010d4b | 230 | tsa_select_mitigation(); |
559c758b DK |
231 | |
232 | /* | |
233 | * After mitigations are selected, some may need to update their | |
234 | * choices. | |
235 | */ | |
480e803d DK |
236 | spectre_v2_update_mitigation(); |
237 | /* | |
238 | * retbleed_update_mitigation() relies on the state set by | |
239 | * spectre_v2_update_mitigation(); specifically it wants to know about | |
240 | * spectre_v2=ibrs. | |
241 | */ | |
e3b78a7a | 242 | retbleed_update_mitigation(); |
61ab72c2 DK |
243 | /* |
244 | * its_update_mitigation() depends on spectre_v2_update_mitigation() | |
245 | * and retbleed_update_mitigation(). | |
246 | */ | |
247 | its_update_mitigation(); | |
ddfca943 DK |
248 | |
249 | /* | |
250 | * spectre_v2_user_update_mitigation() depends on | |
251 | * retbleed_update_mitigation(), specifically the STIBP | |
252 | * selection is forced for UNRET or IBPB. | |
253 | */ | |
254 | spectre_v2_user_update_mitigation(); | |
559c758b | 255 | mds_update_mitigation(); |
bdd7fce7 | 256 | taa_update_mitigation(); |
4a5a04e6 | 257 | mmio_update_mitigation(); |
203d81f8 | 258 | rfds_update_mitigation(); |
efe31382 | 259 | bhi_update_mitigation(); |
1f4bb068 DK |
260 | /* srso_update_mitigation() depends on retbleed_update_mitigation(). */ |
261 | srso_update_mitigation(); | |
559c758b | 262 | |
46d5925b | 263 | spectre_v1_apply_mitigation(); |
480e803d | 264 | spectre_v2_apply_mitigation(); |
e3b78a7a | 265 | retbleed_apply_mitigation(); |
ddfca943 | 266 | spectre_v2_user_apply_mitigation(); |
5ece59a2 | 267 | ssb_apply_mitigation(); |
d43ba2dc | 268 | l1tf_apply_mitigation(); |
559c758b | 269 | mds_apply_mitigation(); |
bdd7fce7 | 270 | taa_apply_mitigation(); |
4a5a04e6 | 271 | mmio_apply_mitigation(); |
203d81f8 | 272 | rfds_apply_mitigation(); |
2178ac58 | 273 | srbds_apply_mitigation(); |
1f4bb068 | 274 | srso_apply_mitigation(); |
9dcad2fb | 275 | gds_apply_mitigation(); |
61ab72c2 | 276 | its_apply_mitigation(); |
efe31382 | 277 | bhi_apply_mitigation(); |
d8010d4b | 278 | tsa_apply_mitigation(); |
1353ebb4 | 279 | } |
61dc0f55 | 280 | |
fc02735b | 281 | /* |
9f2febf3 PB |
282 | * NOTE: This function is *only* called for SVM, since Intel uses |
283 | * MSR_IA32_SPEC_CTRL for SSBD. | |
fc02735b | 284 | */ |
cc69b349 | 285 | void |
bd3d394e | 286 | x86_virt_spec_ctrl(u64 guest_virt_spec_ctrl, bool setguest) |
5cf68754 | 287 | { |
9f2febf3 | 288 | u64 guestval, hostval; |
cc69b349 | 289 | struct thread_info *ti = current_thread_info(); |
885f82bf | 290 | |
47c61b39 TG |
291 | /* |
292 | * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update | |
293 | * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported. | |
294 | */ | |
295 | if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) && | |
296 | !static_cpu_has(X86_FEATURE_VIRT_SSBD)) | |
297 | return; | |
298 | ||
299 | /* | |
300 | * If the host has SSBD mitigation enabled, force it in the host's | |
301 | * virtual MSR value. If its not permanently enabled, evaluate | |
302 | * current's TIF_SSBD thread flag. | |
303 | */ | |
304 | if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE)) | |
305 | hostval = SPEC_CTRL_SSBD; | |
306 | else | |
307 | hostval = ssbd_tif_to_spec_ctrl(ti->flags); | |
308 | ||
309 | /* Sanitize the guest value */ | |
310 | guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD; | |
311 | ||
312 | if (hostval != guestval) { | |
313 | unsigned long tif; | |
314 | ||
315 | tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : | |
316 | ssbd_spec_ctrl_to_tif(hostval); | |
317 | ||
26c4d75b | 318 | speculation_ctrl_update(tif); |
47c61b39 | 319 | } |
5cf68754 | 320 | } |
cc69b349 | 321 | EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); |
5cf68754 | 322 | |
9f65fb29 | 323 | static void x86_amd_ssb_disable(void) |
764f3c21 | 324 | { |
9f65fb29 | 325 | u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask; |
764f3c21 | 326 | |
11fb0683 | 327 | if (boot_cpu_has(X86_FEATURE_VIRT_SSBD)) |
78255eb2 | 328 | wrmsrq(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD); |
11fb0683 | 329 | else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD)) |
78255eb2 | 330 | wrmsrq(MSR_AMD64_LS_CFG, msrval); |
764f3c21 KRW |
331 | } |
332 | ||
bc124170 TG |
333 | #undef pr_fmt |
334 | #define pr_fmt(fmt) "MDS: " fmt | |
335 | ||
cae5ec34 | 336 | /* Default mitigation for MDS-affected CPUs */ |
94045568 | 337 | static enum mds_mitigations mds_mitigation __ro_after_init = |
b8ce25df | 338 | IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_AUTO : MDS_MITIGATION_OFF; |
d71eb0ce | 339 | static bool mds_nosmt __ro_after_init = false; |
bc124170 TG |
340 | |
341 | static const char * const mds_strings[] = { | |
342 | [MDS_MITIGATION_OFF] = "Vulnerable", | |
22dd8365 TG |
343 | [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", |
344 | [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode", | |
bc124170 TG |
345 | }; |
346 | ||
2c93762e DK |
347 | enum taa_mitigations { |
348 | TAA_MITIGATION_OFF, | |
b8ce25df | 349 | TAA_MITIGATION_AUTO, |
2c93762e DK |
350 | TAA_MITIGATION_UCODE_NEEDED, |
351 | TAA_MITIGATION_VERW, | |
352 | TAA_MITIGATION_TSX_DISABLED, | |
353 | }; | |
354 | ||
355 | /* Default mitigation for TAA-affected CPUs */ | |
356 | static enum taa_mitigations taa_mitigation __ro_after_init = | |
b8ce25df | 357 | IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_AUTO : TAA_MITIGATION_OFF; |
2c93762e DK |
358 | |
359 | enum mmio_mitigations { | |
360 | MMIO_MITIGATION_OFF, | |
b8ce25df | 361 | MMIO_MITIGATION_AUTO, |
2c93762e DK |
362 | MMIO_MITIGATION_UCODE_NEEDED, |
363 | MMIO_MITIGATION_VERW, | |
364 | }; | |
365 | ||
366 | /* Default mitigation for Processor MMIO Stale Data vulnerabilities */ | |
367 | static enum mmio_mitigations mmio_mitigation __ro_after_init = | |
b8ce25df | 368 | IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_AUTO : MMIO_MITIGATION_OFF; |
2c93762e DK |
369 | |
370 | enum rfds_mitigations { | |
371 | RFDS_MITIGATION_OFF, | |
b8ce25df | 372 | RFDS_MITIGATION_AUTO, |
2c93762e DK |
373 | RFDS_MITIGATION_VERW, |
374 | RFDS_MITIGATION_UCODE_NEEDED, | |
375 | }; | |
376 | ||
377 | /* Default mitigation for Register File Data Sampling */ | |
378 | static enum rfds_mitigations rfds_mitigation __ro_after_init = | |
b8ce25df | 379 | IS_ENABLED(CONFIG_MITIGATION_RFDS) ? RFDS_MITIGATION_AUTO : RFDS_MITIGATION_OFF; |
2c93762e | 380 | |
559c758b DK |
381 | /* |
382 | * Set if any of MDS/TAA/MMIO/RFDS are going to enable VERW clearing | |
383 | * through X86_FEATURE_CLEAR_CPU_BUF on kernel and guest entry. | |
384 | */ | |
385 | static bool verw_clear_cpu_buf_mitigation_selected __ro_after_init; | |
386 | ||
bc124170 TG |
387 | static void __init mds_select_mitigation(void) |
388 | { | |
5c14068f | 389 | if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) { |
bc124170 TG |
390 | mds_mitigation = MDS_MITIGATION_OFF; |
391 | return; | |
392 | } | |
393 | ||
b8ce25df DK |
394 | if (mds_mitigation == MDS_MITIGATION_AUTO) |
395 | mds_mitigation = MDS_MITIGATION_FULL; | |
396 | ||
559c758b DK |
397 | if (mds_mitigation == MDS_MITIGATION_OFF) |
398 | return; | |
399 | ||
400 | verw_clear_cpu_buf_mitigation_selected = true; | |
401 | } | |
402 | ||
403 | static void __init mds_update_mitigation(void) | |
404 | { | |
405 | if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) | |
406 | return; | |
407 | ||
408 | /* If TAA, MMIO, or RFDS are being mitigated, MDS gets mitigated too. */ | |
409 | if (verw_clear_cpu_buf_mitigation_selected) | |
410 | mds_mitigation = MDS_MITIGATION_FULL; | |
411 | ||
bc124170 | 412 | if (mds_mitigation == MDS_MITIGATION_FULL) { |
22dd8365 TG |
413 | if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) |
414 | mds_mitigation = MDS_MITIGATION_VMWERV; | |
559c758b | 415 | } |
d71eb0ce | 416 | |
559c758b DK |
417 | pr_info("%s\n", mds_strings[mds_mitigation]); |
418 | } | |
d71eb0ce | 419 | |
559c758b DK |
420 | static void __init mds_apply_mitigation(void) |
421 | { | |
422 | if (mds_mitigation == MDS_MITIGATION_FULL || | |
423 | mds_mitigation == MDS_MITIGATION_VMWERV) { | |
424 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); | |
5c14068f JP |
425 | if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) && |
426 | (mds_nosmt || cpu_mitigations_auto_nosmt())) | |
d71eb0ce | 427 | cpu_smt_disable(false); |
bc124170 | 428 | } |
cd5a2aa8 WL |
429 | } |
430 | ||
bc124170 TG |
431 | static int __init mds_cmdline(char *str) |
432 | { | |
433 | if (!boot_cpu_has_bug(X86_BUG_MDS)) | |
434 | return 0; | |
435 | ||
436 | if (!str) | |
437 | return -EINVAL; | |
438 | ||
439 | if (!strcmp(str, "off")) | |
440 | mds_mitigation = MDS_MITIGATION_OFF; | |
441 | else if (!strcmp(str, "full")) | |
442 | mds_mitigation = MDS_MITIGATION_FULL; | |
d71eb0ce JP |
443 | else if (!strcmp(str, "full,nosmt")) { |
444 | mds_mitigation = MDS_MITIGATION_FULL; | |
445 | mds_nosmt = true; | |
446 | } | |
bc124170 TG |
447 | |
448 | return 0; | |
449 | } | |
450 | early_param("mds", mds_cmdline); | |
451 | ||
1b42f017 PG |
452 | #undef pr_fmt |
453 | #define pr_fmt(fmt) "TAA: " fmt | |
454 | ||
1b42f017 PG |
455 | static bool taa_nosmt __ro_after_init; |
456 | ||
457 | static const char * const taa_strings[] = { | |
458 | [TAA_MITIGATION_OFF] = "Vulnerable", | |
459 | [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", | |
460 | [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", | |
461 | [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled", | |
462 | }; | |
463 | ||
bdd7fce7 DK |
464 | static bool __init taa_vulnerable(void) |
465 | { | |
466 | return boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM); | |
467 | } | |
468 | ||
1b42f017 PG |
469 | static void __init taa_select_mitigation(void) |
470 | { | |
1b42f017 PG |
471 | if (!boot_cpu_has_bug(X86_BUG_TAA)) { |
472 | taa_mitigation = TAA_MITIGATION_OFF; | |
473 | return; | |
474 | } | |
475 | ||
476 | /* TSX previously disabled by tsx=off */ | |
477 | if (!boot_cpu_has(X86_FEATURE_RTM)) { | |
478 | taa_mitigation = TAA_MITIGATION_TSX_DISABLED; | |
f52ea6c2 | 479 | return; |
1b42f017 PG |
480 | } |
481 | ||
bdd7fce7 | 482 | if (cpu_mitigations_off()) |
1b42f017 | 483 | taa_mitigation = TAA_MITIGATION_OFF; |
1b42f017 | 484 | |
bdd7fce7 DK |
485 | /* Microcode will be checked in taa_update_mitigation(). */ |
486 | if (taa_mitigation == TAA_MITIGATION_AUTO) | |
487 | taa_mitigation = TAA_MITIGATION_VERW; | |
488 | ||
489 | if (taa_mitigation != TAA_MITIGATION_OFF) | |
490 | verw_clear_cpu_buf_mitigation_selected = true; | |
491 | } | |
492 | ||
493 | static void __init taa_update_mitigation(void) | |
494 | { | |
495 | if (!taa_vulnerable() || cpu_mitigations_off()) | |
f52ea6c2 | 496 | return; |
1b42f017 | 497 | |
bdd7fce7 | 498 | if (verw_clear_cpu_buf_mitigation_selected) |
1b42f017 | 499 | taa_mitigation = TAA_MITIGATION_VERW; |
1b42f017 | 500 | |
bdd7fce7 DK |
501 | if (taa_mitigation == TAA_MITIGATION_VERW) { |
502 | /* Check if the requisite ucode is available. */ | |
503 | if (!boot_cpu_has(X86_FEATURE_MD_CLEAR)) | |
504 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; | |
1b42f017 | 505 | |
bdd7fce7 DK |
506 | /* |
507 | * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1. | |
508 | * A microcode update fixes this behavior to clear CPU buffers. It also | |
509 | * adds support for MSR_IA32_TSX_CTRL which is enumerated by the | |
510 | * ARCH_CAP_TSX_CTRL_MSR bit. | |
511 | * | |
512 | * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode | |
513 | * update is required. | |
514 | */ | |
515 | if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && | |
516 | !(x86_arch_cap_msr & ARCH_CAP_TSX_CTRL_MSR)) | |
517 | taa_mitigation = TAA_MITIGATION_UCODE_NEEDED; | |
518 | } | |
1b42f017 | 519 | |
bdd7fce7 DK |
520 | pr_info("%s\n", taa_strings[taa_mitigation]); |
521 | } | |
522 | ||
523 | static void __init taa_apply_mitigation(void) | |
524 | { | |
525 | if (taa_mitigation == TAA_MITIGATION_VERW || | |
526 | taa_mitigation == TAA_MITIGATION_UCODE_NEEDED) { | |
527 | /* | |
528 | * TSX is enabled, select alternate mitigation for TAA which is | |
529 | * the same as MDS. Enable MDS static branch to clear CPU buffers. | |
530 | * | |
531 | * For guests that can't determine whether the correct microcode is | |
532 | * present on host, enable the mitigation for UCODE_NEEDED as well. | |
533 | */ | |
534 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); | |
535 | ||
536 | if (taa_nosmt || cpu_mitigations_auto_nosmt()) | |
537 | cpu_smt_disable(false); | |
538 | } | |
1b42f017 PG |
539 | } |
540 | ||
541 | static int __init tsx_async_abort_parse_cmdline(char *str) | |
542 | { | |
543 | if (!boot_cpu_has_bug(X86_BUG_TAA)) | |
544 | return 0; | |
545 | ||
546 | if (!str) | |
547 | return -EINVAL; | |
548 | ||
549 | if (!strcmp(str, "off")) { | |
550 | taa_mitigation = TAA_MITIGATION_OFF; | |
551 | } else if (!strcmp(str, "full")) { | |
552 | taa_mitigation = TAA_MITIGATION_VERW; | |
553 | } else if (!strcmp(str, "full,nosmt")) { | |
554 | taa_mitigation = TAA_MITIGATION_VERW; | |
555 | taa_nosmt = true; | |
556 | } | |
557 | ||
558 | return 0; | |
559 | } | |
560 | early_param("tsx_async_abort", tsx_async_abort_parse_cmdline); | |
561 | ||
8cb861e9 PG |
562 | #undef pr_fmt |
563 | #define pr_fmt(fmt) "MMIO Stale Data: " fmt | |
564 | ||
8cb861e9 PG |
565 | static bool mmio_nosmt __ro_after_init = false; |
566 | ||
567 | static const char * const mmio_strings[] = { | |
568 | [MMIO_MITIGATION_OFF] = "Vulnerable", | |
569 | [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode", | |
570 | [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers", | |
571 | }; | |
572 | ||
573 | static void __init mmio_select_mitigation(void) | |
574 | { | |
8cb861e9 | 575 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || |
7df54884 | 576 | cpu_mitigations_off()) { |
8cb861e9 PG |
577 | mmio_mitigation = MMIO_MITIGATION_OFF; |
578 | return; | |
579 | } | |
580 | ||
4a5a04e6 DK |
581 | /* Microcode will be checked in mmio_update_mitigation(). */ |
582 | if (mmio_mitigation == MMIO_MITIGATION_AUTO) | |
583 | mmio_mitigation = MMIO_MITIGATION_VERW; | |
584 | ||
8cb861e9 PG |
585 | if (mmio_mitigation == MMIO_MITIGATION_OFF) |
586 | return; | |
587 | ||
8cb861e9 PG |
588 | /* |
589 | * Enable CPU buffer clear mitigation for host and VMM, if also affected | |
4a5a04e6 | 590 | * by MDS or TAA. |
8cb861e9 | 591 | */ |
4a5a04e6 DK |
592 | if (boot_cpu_has_bug(X86_BUG_MDS) || taa_vulnerable()) |
593 | verw_clear_cpu_buf_mitigation_selected = true; | |
594 | } | |
595 | ||
596 | static void __init mmio_update_mitigation(void) | |
597 | { | |
598 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) || cpu_mitigations_off()) | |
599 | return; | |
600 | ||
601 | if (verw_clear_cpu_buf_mitigation_selected) | |
602 | mmio_mitigation = MMIO_MITIGATION_VERW; | |
603 | ||
604 | if (mmio_mitigation == MMIO_MITIGATION_VERW) { | |
605 | /* | |
606 | * Check if the system has the right microcode. | |
607 | * | |
608 | * CPU Fill buffer clear mitigation is enumerated by either an explicit | |
609 | * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS | |
610 | * affected systems. | |
611 | */ | |
612 | if (!((x86_arch_cap_msr & ARCH_CAP_FB_CLEAR) || | |
613 | (boot_cpu_has(X86_FEATURE_MD_CLEAR) && | |
614 | boot_cpu_has(X86_FEATURE_FLUSH_L1D) && | |
615 | !(x86_arch_cap_msr & ARCH_CAP_MDS_NO)))) | |
616 | mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED; | |
617 | } | |
618 | ||
619 | pr_info("%s\n", mmio_strings[mmio_mitigation]); | |
620 | } | |
621 | ||
622 | static void __init mmio_apply_mitigation(void) | |
623 | { | |
624 | if (mmio_mitigation == MMIO_MITIGATION_OFF) | |
625 | return; | |
e95df4ec PG |
626 | |
627 | /* | |
4a5a04e6 DK |
628 | * Only enable the VMM mitigation if the CPU buffer clear mitigation is |
629 | * not being used. | |
e95df4ec | 630 | */ |
4a5a04e6 DK |
631 | if (verw_clear_cpu_buf_mitigation_selected) { |
632 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); | |
d9b79111 | 633 | static_branch_disable(&cpu_buf_vm_clear); |
4a5a04e6 | 634 | } else { |
d9b79111 | 635 | static_branch_enable(&cpu_buf_vm_clear); |
4a5a04e6 | 636 | } |
8cb861e9 | 637 | |
99a83db5 PG |
638 | /* |
639 | * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can | |
640 | * be propagated to uncore buffers, clearing the Fill buffers on idle | |
641 | * is required irrespective of SMT state. | |
642 | */ | |
d0485730 | 643 | if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) |
f9af88a3 | 644 | static_branch_enable(&cpu_buf_idle_clear); |
99a83db5 | 645 | |
8cb861e9 PG |
646 | if (mmio_nosmt || cpu_mitigations_auto_nosmt()) |
647 | cpu_smt_disable(false); | |
648 | } | |
649 | ||
650 | static int __init mmio_stale_data_parse_cmdline(char *str) | |
651 | { | |
652 | if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) | |
653 | return 0; | |
654 | ||
655 | if (!str) | |
656 | return -EINVAL; | |
657 | ||
658 | if (!strcmp(str, "off")) { | |
659 | mmio_mitigation = MMIO_MITIGATION_OFF; | |
660 | } else if (!strcmp(str, "full")) { | |
661 | mmio_mitigation = MMIO_MITIGATION_VERW; | |
662 | } else if (!strcmp(str, "full,nosmt")) { | |
663 | mmio_mitigation = MMIO_MITIGATION_VERW; | |
664 | mmio_nosmt = true; | |
665 | } | |
666 | ||
667 | return 0; | |
668 | } | |
669 | early_param("mmio_stale_data", mmio_stale_data_parse_cmdline); | |
670 | ||
8076fcde PG |
671 | #undef pr_fmt |
672 | #define pr_fmt(fmt) "Register File Data Sampling: " fmt | |
673 | ||
8076fcde PG |
674 | static const char * const rfds_strings[] = { |
675 | [RFDS_MITIGATION_OFF] = "Vulnerable", | |
676 | [RFDS_MITIGATION_VERW] = "Mitigation: Clear Register File", | |
677 | [RFDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", | |
678 | }; | |
679 | ||
203d81f8 DK |
680 | static inline bool __init verw_clears_cpu_reg_file(void) |
681 | { | |
682 | return (x86_arch_cap_msr & ARCH_CAP_RFDS_CLEAR); | |
683 | } | |
684 | ||
8076fcde PG |
685 | static void __init rfds_select_mitigation(void) |
686 | { | |
687 | if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) { | |
688 | rfds_mitigation = RFDS_MITIGATION_OFF; | |
689 | return; | |
690 | } | |
203d81f8 DK |
691 | |
692 | if (rfds_mitigation == RFDS_MITIGATION_AUTO) | |
693 | rfds_mitigation = RFDS_MITIGATION_VERW; | |
694 | ||
8076fcde PG |
695 | if (rfds_mitigation == RFDS_MITIGATION_OFF) |
696 | return; | |
697 | ||
203d81f8 DK |
698 | if (verw_clears_cpu_reg_file()) |
699 | verw_clear_cpu_buf_mitigation_selected = true; | |
700 | } | |
701 | ||
702 | static void __init rfds_update_mitigation(void) | |
703 | { | |
704 | if (!boot_cpu_has_bug(X86_BUG_RFDS) || cpu_mitigations_off()) | |
705 | return; | |
706 | ||
707 | if (verw_clear_cpu_buf_mitigation_selected) | |
b8ce25df DK |
708 | rfds_mitigation = RFDS_MITIGATION_VERW; |
709 | ||
203d81f8 DK |
710 | if (rfds_mitigation == RFDS_MITIGATION_VERW) { |
711 | if (!verw_clears_cpu_reg_file()) | |
712 | rfds_mitigation = RFDS_MITIGATION_UCODE_NEEDED; | |
713 | } | |
714 | ||
715 | pr_info("%s\n", rfds_strings[rfds_mitigation]); | |
716 | } | |
717 | ||
718 | static void __init rfds_apply_mitigation(void) | |
719 | { | |
720 | if (rfds_mitigation == RFDS_MITIGATION_VERW) | |
8076fcde | 721 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); |
8076fcde PG |
722 | } |
723 | ||
724 | static __init int rfds_parse_cmdline(char *str) | |
725 | { | |
726 | if (!str) | |
727 | return -EINVAL; | |
728 | ||
729 | if (!boot_cpu_has_bug(X86_BUG_RFDS)) | |
730 | return 0; | |
731 | ||
732 | if (!strcmp(str, "off")) | |
733 | rfds_mitigation = RFDS_MITIGATION_OFF; | |
734 | else if (!strcmp(str, "on")) | |
735 | rfds_mitigation = RFDS_MITIGATION_VERW; | |
736 | ||
737 | return 0; | |
738 | } | |
739 | early_param("reg_file_data_sampling", rfds_parse_cmdline); | |
740 | ||
7e5b3c26 MG |
741 | #undef pr_fmt |
742 | #define pr_fmt(fmt) "SRBDS: " fmt | |
743 | ||
744 | enum srbds_mitigations { | |
745 | SRBDS_MITIGATION_OFF, | |
2178ac58 | 746 | SRBDS_MITIGATION_AUTO, |
7e5b3c26 MG |
747 | SRBDS_MITIGATION_UCODE_NEEDED, |
748 | SRBDS_MITIGATION_FULL, | |
749 | SRBDS_MITIGATION_TSX_OFF, | |
750 | SRBDS_MITIGATION_HYPERVISOR, | |
751 | }; | |
752 | ||
a0b02e3f | 753 | static enum srbds_mitigations srbds_mitigation __ro_after_init = |
2178ac58 | 754 | IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_AUTO : SRBDS_MITIGATION_OFF; |
7e5b3c26 MG |
755 | |
756 | static const char * const srbds_strings[] = { | |
757 | [SRBDS_MITIGATION_OFF] = "Vulnerable", | |
758 | [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", | |
759 | [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode", | |
760 | [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled", | |
761 | [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", | |
762 | }; | |
763 | ||
764 | static bool srbds_off; | |
765 | ||
766 | void update_srbds_msr(void) | |
767 | { | |
768 | u64 mcu_ctrl; | |
769 | ||
770 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) | |
771 | return; | |
772 | ||
773 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) | |
774 | return; | |
775 | ||
776 | if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED) | |
777 | return; | |
778 | ||
0205f8a7 RC |
779 | /* |
780 | * A MDS_NO CPU for which SRBDS mitigation is not needed due to TSX | |
781 | * being disabled and it hasn't received the SRBDS MSR microcode. | |
782 | */ | |
783 | if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) | |
784 | return; | |
785 | ||
c435e608 | 786 | rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
7e5b3c26 MG |
787 | |
788 | switch (srbds_mitigation) { | |
789 | case SRBDS_MITIGATION_OFF: | |
790 | case SRBDS_MITIGATION_TSX_OFF: | |
791 | mcu_ctrl |= RNGDS_MITG_DIS; | |
792 | break; | |
793 | case SRBDS_MITIGATION_FULL: | |
794 | mcu_ctrl &= ~RNGDS_MITG_DIS; | |
795 | break; | |
796 | default: | |
797 | break; | |
798 | } | |
799 | ||
78255eb2 | 800 | wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
7e5b3c26 MG |
801 | } |
802 | ||
803 | static void __init srbds_select_mitigation(void) | |
804 | { | |
2178ac58 DK |
805 | if (!boot_cpu_has_bug(X86_BUG_SRBDS) || cpu_mitigations_off()) { |
806 | srbds_mitigation = SRBDS_MITIGATION_OFF; | |
7e5b3c26 | 807 | return; |
2178ac58 DK |
808 | } |
809 | ||
810 | if (srbds_mitigation == SRBDS_MITIGATION_AUTO) | |
811 | srbds_mitigation = SRBDS_MITIGATION_FULL; | |
7e5b3c26 MG |
812 | |
813 | /* | |
22cac9c6 PG |
814 | * Check to see if this is one of the MDS_NO systems supporting TSX that |
815 | * are only exposed to SRBDS when TSX is enabled or when CPU is affected | |
816 | * by Processor MMIO Stale Data vulnerability. | |
7e5b3c26 | 817 | */ |
d0485730 | 818 | if ((x86_arch_cap_msr & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) && |
22cac9c6 | 819 | !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) |
7e5b3c26 MG |
820 | srbds_mitigation = SRBDS_MITIGATION_TSX_OFF; |
821 | else if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) | |
822 | srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR; | |
823 | else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL)) | |
824 | srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED; | |
2178ac58 | 825 | else if (srbds_off) |
7e5b3c26 MG |
826 | srbds_mitigation = SRBDS_MITIGATION_OFF; |
827 | ||
7e5b3c26 MG |
828 | pr_info("%s\n", srbds_strings[srbds_mitigation]); |
829 | } | |
830 | ||
2178ac58 DK |
831 | static void __init srbds_apply_mitigation(void) |
832 | { | |
833 | update_srbds_msr(); | |
834 | } | |
835 | ||
7e5b3c26 MG |
836 | static int __init srbds_parse_cmdline(char *str) |
837 | { | |
838 | if (!str) | |
839 | return -EINVAL; | |
840 | ||
841 | if (!boot_cpu_has_bug(X86_BUG_SRBDS)) | |
842 | return 0; | |
843 | ||
844 | srbds_off = !strcmp(str, "off"); | |
845 | return 0; | |
846 | } | |
847 | early_param("srbds", srbds_parse_cmdline); | |
848 | ||
b5f06f64 BS |
849 | #undef pr_fmt |
850 | #define pr_fmt(fmt) "L1D Flush : " fmt | |
851 | ||
852 | enum l1d_flush_mitigations { | |
853 | L1D_FLUSH_OFF = 0, | |
854 | L1D_FLUSH_ON, | |
855 | }; | |
856 | ||
857 | static enum l1d_flush_mitigations l1d_flush_mitigation __initdata = L1D_FLUSH_OFF; | |
858 | ||
859 | static void __init l1d_flush_select_mitigation(void) | |
860 | { | |
861 | if (!l1d_flush_mitigation || !boot_cpu_has(X86_FEATURE_FLUSH_L1D)) | |
862 | return; | |
863 | ||
864 | static_branch_enable(&switch_mm_cond_l1d_flush); | |
865 | pr_info("Conditional flush on switch_mm() enabled\n"); | |
866 | } | |
867 | ||
868 | static int __init l1d_flush_parse_cmdline(char *str) | |
869 | { | |
870 | if (!strcmp(str, "on")) | |
871 | l1d_flush_mitigation = L1D_FLUSH_ON; | |
872 | ||
873 | return 0; | |
874 | } | |
875 | early_param("l1d_flush", l1d_flush_parse_cmdline); | |
876 | ||
8974eb58 DS |
877 | #undef pr_fmt |
878 | #define pr_fmt(fmt) "GDS: " fmt | |
879 | ||
880 | enum gds_mitigations { | |
881 | GDS_MITIGATION_OFF, | |
9dcad2fb | 882 | GDS_MITIGATION_AUTO, |
8974eb58 | 883 | GDS_MITIGATION_UCODE_NEEDED, |
553a5c03 | 884 | GDS_MITIGATION_FORCE, |
8974eb58 DS |
885 | GDS_MITIGATION_FULL, |
886 | GDS_MITIGATION_FULL_LOCKED, | |
887 | GDS_MITIGATION_HYPERVISOR, | |
888 | }; | |
889 | ||
225f2bd0 | 890 | static enum gds_mitigations gds_mitigation __ro_after_init = |
9dcad2fb | 891 | IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_AUTO : GDS_MITIGATION_OFF; |
8974eb58 DS |
892 | |
893 | static const char * const gds_strings[] = { | |
894 | [GDS_MITIGATION_OFF] = "Vulnerable", | |
895 | [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", | |
553a5c03 | 896 | [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", |
8974eb58 DS |
897 | [GDS_MITIGATION_FULL] = "Mitigation: Microcode", |
898 | [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", | |
899 | [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", | |
900 | }; | |
901 | ||
81ac7e5d DS |
902 | bool gds_ucode_mitigated(void) |
903 | { | |
904 | return (gds_mitigation == GDS_MITIGATION_FULL || | |
905 | gds_mitigation == GDS_MITIGATION_FULL_LOCKED); | |
906 | } | |
907 | EXPORT_SYMBOL_GPL(gds_ucode_mitigated); | |
908 | ||
8974eb58 DS |
909 | void update_gds_msr(void) |
910 | { | |
911 | u64 mcu_ctrl_after; | |
912 | u64 mcu_ctrl; | |
913 | ||
914 | switch (gds_mitigation) { | |
915 | case GDS_MITIGATION_OFF: | |
c435e608 | 916 | rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
8974eb58 DS |
917 | mcu_ctrl |= GDS_MITG_DIS; |
918 | break; | |
919 | case GDS_MITIGATION_FULL_LOCKED: | |
920 | /* | |
921 | * The LOCKED state comes from the boot CPU. APs might not have | |
922 | * the same state. Make sure the mitigation is enabled on all | |
923 | * CPUs. | |
924 | */ | |
925 | case GDS_MITIGATION_FULL: | |
c435e608 | 926 | rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
8974eb58 DS |
927 | mcu_ctrl &= ~GDS_MITG_DIS; |
928 | break; | |
553a5c03 | 929 | case GDS_MITIGATION_FORCE: |
8974eb58 DS |
930 | case GDS_MITIGATION_UCODE_NEEDED: |
931 | case GDS_MITIGATION_HYPERVISOR: | |
9dcad2fb | 932 | case GDS_MITIGATION_AUTO: |
8974eb58 | 933 | return; |
904e1ddd | 934 | } |
8974eb58 | 935 | |
78255eb2 | 936 | wrmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
8974eb58 DS |
937 | |
938 | /* | |
939 | * Check to make sure that the WRMSR value was not ignored. Writes to | |
940 | * GDS_MITG_DIS will be ignored if this processor is locked but the boot | |
941 | * processor was not. | |
942 | */ | |
c435e608 | 943 | rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); |
8974eb58 DS |
944 | WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); |
945 | } | |
946 | ||
947 | static void __init gds_select_mitigation(void) | |
948 | { | |
949 | u64 mcu_ctrl; | |
950 | ||
951 | if (!boot_cpu_has_bug(X86_BUG_GDS)) | |
952 | return; | |
953 | ||
954 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { | |
955 | gds_mitigation = GDS_MITIGATION_HYPERVISOR; | |
9dcad2fb | 956 | return; |
8974eb58 DS |
957 | } |
958 | ||
959 | if (cpu_mitigations_off()) | |
960 | gds_mitigation = GDS_MITIGATION_OFF; | |
961 | /* Will verify below that mitigation _can_ be disabled */ | |
962 | ||
9dcad2fb DK |
963 | if (gds_mitigation == GDS_MITIGATION_AUTO) |
964 | gds_mitigation = GDS_MITIGATION_FULL; | |
965 | ||
8974eb58 | 966 | /* No microcode */ |
d0485730 | 967 | if (!(x86_arch_cap_msr & ARCH_CAP_GDS_CTRL)) { |
9dcad2fb | 968 | if (gds_mitigation != GDS_MITIGATION_FORCE) |
553a5c03 | 969 | gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; |
9dcad2fb | 970 | return; |
8974eb58 DS |
971 | } |
972 | ||
553a5c03 DS |
973 | /* Microcode has mitigation, use it */ |
974 | if (gds_mitigation == GDS_MITIGATION_FORCE) | |
975 | gds_mitigation = GDS_MITIGATION_FULL; | |
976 | ||
c435e608 | 977 | rdmsrq(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); |
8974eb58 DS |
978 | if (mcu_ctrl & GDS_MITG_LOCKED) { |
979 | if (gds_mitigation == GDS_MITIGATION_OFF) | |
980 | pr_warn("Mitigation locked. Disable failed.\n"); | |
981 | ||
982 | /* | |
983 | * The mitigation is selected from the boot CPU. All other CPUs | |
984 | * _should_ have the same state. If the boot CPU isn't locked | |
985 | * but others are then update_gds_msr() will WARN() of the state | |
986 | * mismatch. If the boot CPU is locked update_gds_msr() will | |
987 | * ensure the other CPUs have the mitigation enabled. | |
988 | */ | |
989 | gds_mitigation = GDS_MITIGATION_FULL_LOCKED; | |
990 | } | |
9dcad2fb DK |
991 | } |
992 | ||
993 | static void __init gds_apply_mitigation(void) | |
994 | { | |
995 | if (!boot_cpu_has_bug(X86_BUG_GDS)) | |
996 | return; | |
997 | ||
998 | /* Microcode is present */ | |
999 | if (x86_arch_cap_msr & ARCH_CAP_GDS_CTRL) | |
1000 | update_gds_msr(); | |
1001 | else if (gds_mitigation == GDS_MITIGATION_FORCE) { | |
1002 | /* | |
1003 | * This only needs to be done on the boot CPU so do it | |
1004 | * here rather than in update_gds_msr() | |
1005 | */ | |
1006 | setup_clear_cpu_cap(X86_FEATURE_AVX); | |
1007 | pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); | |
1008 | } | |
8974eb58 | 1009 | |
8974eb58 DS |
1010 | pr_info("%s\n", gds_strings[gds_mitigation]); |
1011 | } | |
1012 | ||
1013 | static int __init gds_parse_cmdline(char *str) | |
1014 | { | |
1015 | if (!str) | |
1016 | return -EINVAL; | |
1017 | ||
1018 | if (!boot_cpu_has_bug(X86_BUG_GDS)) | |
1019 | return 0; | |
1020 | ||
1021 | if (!strcmp(str, "off")) | |
1022 | gds_mitigation = GDS_MITIGATION_OFF; | |
553a5c03 DS |
1023 | else if (!strcmp(str, "force")) |
1024 | gds_mitigation = GDS_MITIGATION_FORCE; | |
8974eb58 DS |
1025 | |
1026 | return 0; | |
1027 | } | |
1028 | early_param("gather_data_sampling", gds_parse_cmdline); | |
1029 | ||
a2059825 JP |
1030 | #undef pr_fmt |
1031 | #define pr_fmt(fmt) "Spectre V1 : " fmt | |
1032 | ||
1033 | enum spectre_v1_mitigation { | |
1034 | SPECTRE_V1_MITIGATION_NONE, | |
1035 | SPECTRE_V1_MITIGATION_AUTO, | |
1036 | }; | |
1037 | ||
1038 | static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init = | |
ca01c0d8 BL |
1039 | IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ? |
1040 | SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE; | |
a2059825 JP |
1041 | |
1042 | static const char * const spectre_v1_strings[] = { | |
1043 | [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers", | |
1044 | [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization", | |
1045 | }; | |
1046 | ||
a2059825 JP |
1047 | /* |
1048 | * Does SMAP provide full mitigation against speculative kernel access to | |
1049 | * userspace? | |
1050 | */ | |
1051 | static bool smap_works_speculatively(void) | |
1052 | { | |
1053 | if (!boot_cpu_has(X86_FEATURE_SMAP)) | |
1054 | return false; | |
1055 | ||
1056 | /* | |
1057 | * On CPUs which are vulnerable to Meltdown, SMAP does not | |
1058 | * prevent speculative access to user data in the L1 cache. | |
1059 | * Consider SMAP to be non-functional as a mitigation on these | |
1060 | * CPUs. | |
1061 | */ | |
1062 | if (boot_cpu_has(X86_BUG_CPU_MELTDOWN)) | |
1063 | return false; | |
1064 | ||
1065 | return true; | |
1066 | } | |
1067 | ||
1068 | static void __init spectre_v1_select_mitigation(void) | |
1069 | { | |
46d5925b | 1070 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) |
a2059825 | 1071 | spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; |
46d5925b DK |
1072 | } |
1073 | ||
1074 | static void __init spectre_v1_apply_mitigation(void) | |
1075 | { | |
1076 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) | |
a2059825 | 1077 | return; |
a2059825 JP |
1078 | |
1079 | if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) { | |
1080 | /* | |
1081 | * With Spectre v1, a user can speculatively control either | |
1082 | * path of a conditional swapgs with a user-controlled GS | |
1083 | * value. The mitigation is to add lfences to both code paths. | |
1084 | * | |
1085 | * If FSGSBASE is enabled, the user can put a kernel address in | |
1086 | * GS, in which case SMAP provides no protection. | |
1087 | * | |
a2059825 JP |
1088 | * If FSGSBASE is disabled, the user can only put a user space |
1089 | * address in GS. That makes an attack harder, but still | |
1090 | * possible if there's no SMAP protection. | |
1091 | */ | |
978e1342 TL |
1092 | if (boot_cpu_has(X86_FEATURE_FSGSBASE) || |
1093 | !smap_works_speculatively()) { | |
a2059825 JP |
1094 | /* |
1095 | * Mitigation can be provided from SWAPGS itself or | |
1096 | * PTI as the CR3 write in the Meltdown mitigation | |
1097 | * is serializing. | |
1098 | * | |
f36cf386 TG |
1099 | * If neither is there, mitigate with an LFENCE to |
1100 | * stop speculation through swapgs. | |
a2059825 | 1101 | */ |
f36cf386 TG |
1102 | if (boot_cpu_has_bug(X86_BUG_SWAPGS) && |
1103 | !boot_cpu_has(X86_FEATURE_PTI)) | |
a2059825 JP |
1104 | setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER); |
1105 | ||
1106 | /* | |
1107 | * Enable lfences in the kernel entry (non-swapgs) | |
1108 | * paths, to prevent user entry from speculatively | |
1109 | * skipping swapgs. | |
1110 | */ | |
1111 | setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL); | |
1112 | } | |
1113 | } | |
1114 | ||
1115 | pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]); | |
1116 | } | |
1117 | ||
1118 | static int __init nospectre_v1_cmdline(char *str) | |
1119 | { | |
1120 | spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE; | |
1121 | return 0; | |
1122 | } | |
1123 | early_param("nospectre_v1", nospectre_v1_cmdline); | |
1124 | ||
8cc68c9c | 1125 | enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = SPECTRE_V2_NONE; |
6ad0ad2b | 1126 | |
7fbf47c7 AC |
1127 | #undef pr_fmt |
1128 | #define pr_fmt(fmt) "RETBleed: " fmt | |
1129 | ||
61ab72c2 DK |
1130 | enum its_mitigation { |
1131 | ITS_MITIGATION_OFF, | |
1132 | ITS_MITIGATION_AUTO, | |
1133 | ITS_MITIGATION_VMEXIT_ONLY, | |
1134 | ITS_MITIGATION_ALIGNED_THUNKS, | |
1135 | ITS_MITIGATION_RETPOLINE_STUFF, | |
1136 | }; | |
1137 | ||
1138 | static enum its_mitigation its_mitigation __ro_after_init = | |
1139 | IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_MITIGATION_AUTO : ITS_MITIGATION_OFF; | |
1140 | ||
7fbf47c7 AC |
1141 | enum retbleed_mitigation { |
1142 | RETBLEED_MITIGATION_NONE, | |
e3b78a7a | 1143 | RETBLEED_MITIGATION_AUTO, |
7fbf47c7 | 1144 | RETBLEED_MITIGATION_UNRET, |
3ebc1700 | 1145 | RETBLEED_MITIGATION_IBPB, |
6ad0ad2b PZ |
1146 | RETBLEED_MITIGATION_IBRS, |
1147 | RETBLEED_MITIGATION_EIBRS, | |
d82a0345 | 1148 | RETBLEED_MITIGATION_STUFF, |
7fbf47c7 AC |
1149 | }; |
1150 | ||
33a8573b | 1151 | static const char * const retbleed_strings[] = { |
7fbf47c7 AC |
1152 | [RETBLEED_MITIGATION_NONE] = "Vulnerable", |
1153 | [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk", | |
3ebc1700 | 1154 | [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB", |
6ad0ad2b PZ |
1155 | [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS", |
1156 | [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS", | |
d82a0345 | 1157 | [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing", |
7fbf47c7 AC |
1158 | }; |
1159 | ||
1160 | static enum retbleed_mitigation retbleed_mitigation __ro_after_init = | |
e3b78a7a | 1161 | IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_MITIGATION_AUTO : RETBLEED_MITIGATION_NONE; |
7fbf47c7 | 1162 | |
e8ec1b6e KP |
1163 | static int __ro_after_init retbleed_nosmt = false; |
1164 | ||
7fbf47c7 AC |
1165 | static int __init retbleed_parse_cmdline(char *str) |
1166 | { | |
1167 | if (!str) | |
1168 | return -EINVAL; | |
1169 | ||
e8ec1b6e KP |
1170 | while (str) { |
1171 | char *next = strchr(str, ','); | |
1172 | if (next) { | |
1173 | *next = 0; | |
1174 | next++; | |
1175 | } | |
1176 | ||
1177 | if (!strcmp(str, "off")) { | |
e3b78a7a | 1178 | retbleed_mitigation = RETBLEED_MITIGATION_NONE; |
e8ec1b6e | 1179 | } else if (!strcmp(str, "auto")) { |
e3b78a7a | 1180 | retbleed_mitigation = RETBLEED_MITIGATION_AUTO; |
e8ec1b6e | 1181 | } else if (!strcmp(str, "unret")) { |
e3b78a7a | 1182 | retbleed_mitigation = RETBLEED_MITIGATION_UNRET; |
3ebc1700 | 1183 | } else if (!strcmp(str, "ibpb")) { |
e3b78a7a | 1184 | retbleed_mitigation = RETBLEED_MITIGATION_IBPB; |
d82a0345 | 1185 | } else if (!strcmp(str, "stuff")) { |
e3b78a7a | 1186 | retbleed_mitigation = RETBLEED_MITIGATION_STUFF; |
e8ec1b6e KP |
1187 | } else if (!strcmp(str, "nosmt")) { |
1188 | retbleed_nosmt = true; | |
5c9a92de PZI |
1189 | } else if (!strcmp(str, "force")) { |
1190 | setup_force_cpu_bug(X86_BUG_RETBLEED); | |
e8ec1b6e KP |
1191 | } else { |
1192 | pr_err("Ignoring unknown retbleed option (%s).", str); | |
1193 | } | |
1194 | ||
1195 | str = next; | |
1196 | } | |
7fbf47c7 AC |
1197 | |
1198 | return 0; | |
1199 | } | |
1200 | early_param("retbleed", retbleed_parse_cmdline); | |
1201 | ||
1202 | #define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n" | |
6ad0ad2b | 1203 | #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n" |
7fbf47c7 AC |
1204 | |
1205 | static void __init retbleed_select_mitigation(void) | |
1206 | { | |
e3b78a7a DK |
1207 | if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) { |
1208 | retbleed_mitigation = RETBLEED_MITIGATION_NONE; | |
7fbf47c7 | 1209 | return; |
e3b78a7a | 1210 | } |
7fbf47c7 | 1211 | |
e3b78a7a DK |
1212 | switch (retbleed_mitigation) { |
1213 | case RETBLEED_MITIGATION_UNRET: | |
1214 | if (!IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) { | |
1215 | retbleed_mitigation = RETBLEED_MITIGATION_AUTO; | |
ac61d439 | 1216 | pr_err("WARNING: kernel not compiled with MITIGATION_UNRET_ENTRY.\n"); |
f43b9876 | 1217 | } |
7fbf47c7 | 1218 | break; |
e3b78a7a | 1219 | case RETBLEED_MITIGATION_IBPB: |
2259da15 TLSC |
1220 | if (!boot_cpu_has(X86_FEATURE_IBPB)) { |
1221 | pr_err("WARNING: CPU does not support IBPB.\n"); | |
e3b78a7a DK |
1222 | retbleed_mitigation = RETBLEED_MITIGATION_AUTO; |
1223 | } else if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { | |
e0b8fcfa | 1224 | pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); |
e3b78a7a DK |
1225 | retbleed_mitigation = RETBLEED_MITIGATION_AUTO; |
1226 | } | |
1227 | break; | |
1228 | case RETBLEED_MITIGATION_STUFF: | |
1229 | if (!IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { | |
1230 | pr_err("WARNING: kernel not compiled with MITIGATION_CALL_DEPTH_TRACKING.\n"); | |
1231 | retbleed_mitigation = RETBLEED_MITIGATION_AUTO; | |
1232 | } else if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { | |
1233 | pr_err("WARNING: retbleed=stuff only supported for Intel CPUs.\n"); | |
1234 | retbleed_mitigation = RETBLEED_MITIGATION_AUTO; | |
f43b9876 | 1235 | } |
3ebc1700 | 1236 | break; |
e3b78a7a DK |
1237 | default: |
1238 | break; | |
1239 | } | |
3ebc1700 | 1240 | |
e3b78a7a DK |
1241 | if (retbleed_mitigation != RETBLEED_MITIGATION_AUTO) |
1242 | return; | |
d82a0345 | 1243 | |
e3b78a7a DK |
1244 | /* Intel mitigation selected in retbleed_update_mitigation() */ |
1245 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || | |
1246 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { | |
1247 | if (IS_ENABLED(CONFIG_MITIGATION_UNRET_ENTRY)) | |
1248 | retbleed_mitigation = RETBLEED_MITIGATION_UNRET; | |
1249 | else if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY) && | |
1250 | boot_cpu_has(X86_FEATURE_IBPB)) | |
1251 | retbleed_mitigation = RETBLEED_MITIGATION_IBPB; | |
1252 | else | |
1253 | retbleed_mitigation = RETBLEED_MITIGATION_NONE; | |
1254 | } | |
1255 | } | |
d82a0345 | 1256 | |
e3b78a7a DK |
1257 | static void __init retbleed_update_mitigation(void) |
1258 | { | |
1259 | if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off()) | |
1260 | return; | |
1261 | ||
1262 | if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) | |
1263 | goto out; | |
d82a0345 | 1264 | |
e3b78a7a DK |
1265 | /* |
1266 | * retbleed=stuff is only allowed on Intel. If stuffing can't be used | |
1267 | * then a different mitigation will be selected below. | |
61ab72c2 DK |
1268 | * |
1269 | * its=stuff will also attempt to enable stuffing. | |
e3b78a7a | 1270 | */ |
61ab72c2 DK |
1271 | if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF || |
1272 | its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF) { | |
e3b78a7a DK |
1273 | if (spectre_v2_enabled != SPECTRE_V2_RETPOLINE) { |
1274 | pr_err("WARNING: retbleed=stuff depends on spectre_v2=retpoline\n"); | |
1275 | retbleed_mitigation = RETBLEED_MITIGATION_AUTO; | |
61ab72c2 DK |
1276 | } else { |
1277 | if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) | |
1278 | pr_info("Retbleed mitigation updated to stuffing\n"); | |
1279 | ||
1280 | retbleed_mitigation = RETBLEED_MITIGATION_STUFF; | |
f43b9876 | 1281 | } |
e3b78a7a DK |
1282 | } |
1283 | /* | |
1284 | * Let IBRS trump all on Intel without affecting the effects of the | |
1285 | * retbleed= cmdline option except for call depth based stuffing | |
1286 | */ | |
1287 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { | |
1288 | switch (spectre_v2_enabled) { | |
1289 | case SPECTRE_V2_IBRS: | |
1290 | retbleed_mitigation = RETBLEED_MITIGATION_IBRS; | |
1291 | break; | |
1292 | case SPECTRE_V2_EIBRS: | |
1293 | case SPECTRE_V2_EIBRS_RETPOLINE: | |
1294 | case SPECTRE_V2_EIBRS_LFENCE: | |
1295 | retbleed_mitigation = RETBLEED_MITIGATION_EIBRS; | |
1296 | break; | |
1297 | default: | |
1298 | if (retbleed_mitigation != RETBLEED_MITIGATION_STUFF) | |
1299 | pr_err(RETBLEED_INTEL_MSG); | |
1300 | } | |
1301 | /* If nothing has set the mitigation yet, default to NONE. */ | |
1302 | if (retbleed_mitigation == RETBLEED_MITIGATION_AUTO) | |
1303 | retbleed_mitigation = RETBLEED_MITIGATION_NONE; | |
1304 | } | |
1305 | out: | |
1306 | pr_info("%s\n", retbleed_strings[retbleed_mitigation]); | |
1307 | } | |
6ad0ad2b | 1308 | |
6ad0ad2b | 1309 | |
e3b78a7a DK |
1310 | static void __init retbleed_apply_mitigation(void) |
1311 | { | |
1312 | bool mitigate_smt = false; | |
7fbf47c7 AC |
1313 | |
1314 | switch (retbleed_mitigation) { | |
e3b78a7a DK |
1315 | case RETBLEED_MITIGATION_NONE: |
1316 | return; | |
1317 | ||
7fbf47c7 | 1318 | case RETBLEED_MITIGATION_UNRET: |
7fbf47c7 AC |
1319 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); |
1320 | setup_force_cpu_cap(X86_FEATURE_UNRET); | |
1321 | ||
f4818881 | 1322 | set_return_thunk(retbleed_return_thunk); |
d43490d0 | 1323 | |
7fbf47c7 AC |
1324 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
1325 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) | |
1326 | pr_err(RETBLEED_UNTRAIN_MSG); | |
3ebc1700 PZ |
1327 | |
1328 | mitigate_smt = true; | |
1329 | break; | |
1330 | ||
1331 | case RETBLEED_MITIGATION_IBPB: | |
3ebc1700 | 1332 | setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); |
318e8c33 PB |
1333 | setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); |
1334 | mitigate_smt = true; | |
c62fa117 JW |
1335 | |
1336 | /* | |
1337 | * IBPB on entry already obviates the need for | |
1338 | * software-based untraining so clear those in case some | |
1339 | * other mitigation like SRSO has selected them. | |
1340 | */ | |
1341 | setup_clear_cpu_cap(X86_FEATURE_UNRET); | |
1342 | setup_clear_cpu_cap(X86_FEATURE_RETHUNK); | |
1343 | ||
0fad2878 | 1344 | /* |
13235d6d | 1345 | * There is no need for RSB filling: write_ibpb() ensures |
0fad2878 JW |
1346 | * all predictions, including the RSB, are invalidated, |
1347 | * regardless of IBPB implementation. | |
1348 | */ | |
1349 | setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); | |
1350 | ||
7fbf47c7 AC |
1351 | break; |
1352 | ||
d82a0345 TG |
1353 | case RETBLEED_MITIGATION_STUFF: |
1354 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); | |
1355 | setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH); | |
99ee56c7 | 1356 | |
f4818881 | 1357 | set_return_thunk(call_depth_return_thunk); |
d82a0345 TG |
1358 | break; |
1359 | ||
7fbf47c7 AC |
1360 | default: |
1361 | break; | |
1362 | } | |
1363 | ||
3ebc1700 PZ |
1364 | if (mitigate_smt && !boot_cpu_has(X86_FEATURE_STIBP) && |
1365 | (retbleed_nosmt || cpu_mitigations_auto_nosmt())) | |
1366 | cpu_smt_disable(false); | |
7fbf47c7 AC |
1367 | } |
1368 | ||
f4818881 PG |
1369 | #undef pr_fmt |
1370 | #define pr_fmt(fmt) "ITS: " fmt | |
1371 | ||
f4818881 PG |
1372 | static const char * const its_strings[] = { |
1373 | [ITS_MITIGATION_OFF] = "Vulnerable", | |
2665281a | 1374 | [ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected", |
f4818881 PG |
1375 | [ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks", |
1376 | [ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB", | |
1377 | }; | |
1378 | ||
f4818881 PG |
1379 | static int __init its_parse_cmdline(char *str) |
1380 | { | |
1381 | if (!str) | |
1382 | return -EINVAL; | |
1383 | ||
1384 | if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) { | |
1385 | pr_err("Mitigation disabled at compile time, ignoring option (%s)", str); | |
1386 | return 0; | |
1387 | } | |
1388 | ||
1389 | if (!strcmp(str, "off")) { | |
61ab72c2 | 1390 | its_mitigation = ITS_MITIGATION_OFF; |
f4818881 | 1391 | } else if (!strcmp(str, "on")) { |
61ab72c2 | 1392 | its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; |
f4818881 | 1393 | } else if (!strcmp(str, "force")) { |
61ab72c2 | 1394 | its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; |
f4818881 | 1395 | setup_force_cpu_bug(X86_BUG_ITS); |
2665281a | 1396 | } else if (!strcmp(str, "vmexit")) { |
61ab72c2 | 1397 | its_mitigation = ITS_MITIGATION_VMEXIT_ONLY; |
facd226f | 1398 | } else if (!strcmp(str, "stuff")) { |
61ab72c2 | 1399 | its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; |
f4818881 PG |
1400 | } else { |
1401 | pr_err("Ignoring unknown indirect_target_selection option (%s).", str); | |
1402 | } | |
1403 | ||
1404 | return 0; | |
1405 | } | |
1406 | early_param("indirect_target_selection", its_parse_cmdline); | |
1407 | ||
1408 | static void __init its_select_mitigation(void) | |
1409 | { | |
f4818881 PG |
1410 | if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) { |
1411 | its_mitigation = ITS_MITIGATION_OFF; | |
1412 | return; | |
1413 | } | |
1414 | ||
61ab72c2 DK |
1415 | if (its_mitigation == ITS_MITIGATION_AUTO) |
1416 | its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; | |
1417 | ||
1418 | if (its_mitigation == ITS_MITIGATION_OFF) | |
1419 | return; | |
f4818881 | 1420 | |
f4818881 PG |
1421 | if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) || |
1422 | !IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) { | |
1423 | pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n"); | |
1424 | its_mitigation = ITS_MITIGATION_OFF; | |
61ab72c2 | 1425 | return; |
f4818881 | 1426 | } |
61ab72c2 | 1427 | |
f4818881 PG |
1428 | if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) { |
1429 | pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n"); | |
1430 | its_mitigation = ITS_MITIGATION_OFF; | |
61ab72c2 | 1431 | return; |
f4818881 PG |
1432 | } |
1433 | ||
61ab72c2 DK |
1434 | if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && |
1435 | !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING)) { | |
facd226f | 1436 | pr_err("RSB stuff mitigation not supported, using default\n"); |
61ab72c2 | 1437 | its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; |
facd226f PG |
1438 | } |
1439 | ||
61ab72c2 DK |
1440 | if (its_mitigation == ITS_MITIGATION_VMEXIT_ONLY && |
1441 | !boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) | |
1442 | its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; | |
1443 | } | |
1444 | ||
1445 | static void __init its_update_mitigation(void) | |
1446 | { | |
1447 | if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) | |
1448 | return; | |
1449 | ||
1450 | switch (spectre_v2_enabled) { | |
1451 | case SPECTRE_V2_NONE: | |
1452 | pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n"); | |
f4818881 PG |
1453 | its_mitigation = ITS_MITIGATION_OFF; |
1454 | break; | |
61ab72c2 DK |
1455 | case SPECTRE_V2_RETPOLINE: |
1456 | /* Retpoline+CDT mitigates ITS */ | |
1457 | if (retbleed_mitigation == RETBLEED_MITIGATION_STUFF) | |
1458 | its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF; | |
f4818881 | 1459 | break; |
61ab72c2 DK |
1460 | case SPECTRE_V2_LFENCE: |
1461 | case SPECTRE_V2_EIBRS_LFENCE: | |
1462 | pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n"); | |
1463 | its_mitigation = ITS_MITIGATION_OFF; | |
1464 | break; | |
1465 | default: | |
facd226f | 1466 | break; |
f4818881 | 1467 | } |
61ab72c2 DK |
1468 | |
1469 | /* | |
1470 | * retbleed_update_mitigation() will try to do stuffing if its=stuff. | |
1471 | * If it can't, such as if spectre_v2!=retpoline, then fall back to | |
1472 | * aligned thunks. | |
1473 | */ | |
1474 | if (its_mitigation == ITS_MITIGATION_RETPOLINE_STUFF && | |
1475 | retbleed_mitigation != RETBLEED_MITIGATION_STUFF) | |
1476 | its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS; | |
1477 | ||
f4818881 PG |
1478 | pr_info("%s\n", its_strings[its_mitigation]); |
1479 | } | |
1480 | ||
61ab72c2 DK |
1481 | static void __init its_apply_mitigation(void) |
1482 | { | |
1483 | /* its=stuff forces retbleed stuffing and is enabled there. */ | |
1484 | if (its_mitigation != ITS_MITIGATION_ALIGNED_THUNKS) | |
1485 | return; | |
1486 | ||
1487 | if (!boot_cpu_has(X86_FEATURE_RETPOLINE)) | |
1488 | setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS); | |
1489 | ||
1490 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); | |
1491 | set_return_thunk(its_return_thunk); | |
1492 | } | |
1493 | ||
d8010d4b BPA |
1494 | #undef pr_fmt |
1495 | #define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt | |
1496 | ||
1497 | enum tsa_mitigations { | |
1498 | TSA_MITIGATION_NONE, | |
1499 | TSA_MITIGATION_AUTO, | |
1500 | TSA_MITIGATION_UCODE_NEEDED, | |
1501 | TSA_MITIGATION_USER_KERNEL, | |
1502 | TSA_MITIGATION_VM, | |
1503 | TSA_MITIGATION_FULL, | |
1504 | }; | |
1505 | ||
1506 | static const char * const tsa_strings[] = { | |
1507 | [TSA_MITIGATION_NONE] = "Vulnerable", | |
1508 | [TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", | |
1509 | [TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary", | |
1510 | [TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM", | |
1511 | [TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers", | |
1512 | }; | |
1513 | ||
1514 | static enum tsa_mitigations tsa_mitigation __ro_after_init = | |
1515 | IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE; | |
1516 | ||
1517 | static int __init tsa_parse_cmdline(char *str) | |
1518 | { | |
1519 | if (!str) | |
1520 | return -EINVAL; | |
1521 | ||
1522 | if (!strcmp(str, "off")) | |
1523 | tsa_mitigation = TSA_MITIGATION_NONE; | |
1524 | else if (!strcmp(str, "on")) | |
1525 | tsa_mitigation = TSA_MITIGATION_FULL; | |
1526 | else if (!strcmp(str, "user")) | |
1527 | tsa_mitigation = TSA_MITIGATION_USER_KERNEL; | |
1528 | else if (!strcmp(str, "vm")) | |
1529 | tsa_mitigation = TSA_MITIGATION_VM; | |
1530 | else | |
1531 | pr_err("Ignoring unknown tsa=%s option.\n", str); | |
1532 | ||
1533 | return 0; | |
1534 | } | |
1535 | early_param("tsa", tsa_parse_cmdline); | |
1536 | ||
1537 | static void __init tsa_select_mitigation(void) | |
1538 | { | |
1539 | if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) { | |
1540 | tsa_mitigation = TSA_MITIGATION_NONE; | |
1541 | return; | |
1542 | } | |
1543 | ||
1544 | if (tsa_mitigation == TSA_MITIGATION_NONE) | |
1545 | return; | |
1546 | ||
1547 | if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) { | |
1548 | tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED; | |
1549 | goto out; | |
1550 | } | |
1551 | ||
1552 | if (tsa_mitigation == TSA_MITIGATION_AUTO) | |
1553 | tsa_mitigation = TSA_MITIGATION_FULL; | |
1554 | ||
1555 | /* | |
1556 | * No need to set verw_clear_cpu_buf_mitigation_selected - it | |
1557 | * doesn't fit all cases here and it is not needed because this | |
1558 | * is the only VERW-based mitigation on AMD. | |
1559 | */ | |
1560 | out: | |
1561 | pr_info("%s\n", tsa_strings[tsa_mitigation]); | |
1562 | } | |
1563 | ||
1564 | static void __init tsa_apply_mitigation(void) | |
1565 | { | |
1566 | switch (tsa_mitigation) { | |
1567 | case TSA_MITIGATION_USER_KERNEL: | |
1568 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); | |
1569 | break; | |
1570 | case TSA_MITIGATION_VM: | |
1571 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); | |
1572 | break; | |
1573 | case TSA_MITIGATION_FULL: | |
1574 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF); | |
1575 | setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM); | |
1576 | break; | |
1577 | default: | |
1578 | break; | |
1579 | } | |
1580 | } | |
1581 | ||
15d6b7aa TG |
1582 | #undef pr_fmt |
1583 | #define pr_fmt(fmt) "Spectre V2 : " fmt | |
1584 | ||
21998a35 AS |
1585 | static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init = |
1586 | SPECTRE_V2_USER_NONE; | |
1587 | static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init = | |
fa1202ef TG |
1588 | SPECTRE_V2_USER_NONE; |
1589 | ||
aefb2f2e | 1590 | #ifdef CONFIG_MITIGATION_RETPOLINE |
e383095c TG |
1591 | static bool spectre_v2_bad_module; |
1592 | ||
caf7501a AK |
1593 | bool retpoline_module_ok(bool has_retpoline) |
1594 | { | |
1595 | if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline) | |
1596 | return true; | |
1597 | ||
e698dcdf | 1598 | pr_err("System may be vulnerable to spectre v2\n"); |
caf7501a AK |
1599 | spectre_v2_bad_module = true; |
1600 | return false; | |
1601 | } | |
e383095c TG |
1602 | |
1603 | static inline const char *spectre_v2_module_string(void) | |
1604 | { | |
1605 | return spectre_v2_bad_module ? " - vulnerable module loaded" : ""; | |
1606 | } | |
1607 | #else | |
1608 | static inline const char *spectre_v2_module_string(void) { return ""; } | |
caf7501a | 1609 | #endif |
da285121 | 1610 | |
eafd987d | 1611 | #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" |
44a3918c | 1612 | #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" |
0de05d05 | 1613 | #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" |
eb23b5ef | 1614 | #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n" |
44a3918c JP |
1615 | |
1616 | #ifdef CONFIG_BPF_SYSCALL | |
1617 | void unpriv_ebpf_notify(int new_state) | |
1618 | { | |
0de05d05 JP |
1619 | if (new_state) |
1620 | return; | |
1621 | ||
1622 | /* Unprivileged eBPF is enabled */ | |
1623 | ||
1624 | switch (spectre_v2_enabled) { | |
1625 | case SPECTRE_V2_EIBRS: | |
44a3918c | 1626 | pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
0de05d05 JP |
1627 | break; |
1628 | case SPECTRE_V2_EIBRS_LFENCE: | |
1629 | if (sched_smt_active()) | |
1630 | pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); | |
1631 | break; | |
1632 | default: | |
1633 | break; | |
1634 | } | |
44a3918c JP |
1635 | } |
1636 | #endif | |
1637 | ||
da285121 DW |
1638 | static inline bool match_option(const char *arg, int arglen, const char *opt) |
1639 | { | |
1640 | int len = strlen(opt); | |
1641 | ||
1642 | return len == arglen && !strncmp(arg, opt, len); | |
1643 | } | |
1644 | ||
15d6b7aa TG |
1645 | /* The kernel command line selection for spectre v2 */ |
1646 | enum spectre_v2_mitigation_cmd { | |
1647 | SPECTRE_V2_CMD_NONE, | |
1648 | SPECTRE_V2_CMD_AUTO, | |
1649 | SPECTRE_V2_CMD_FORCE, | |
1650 | SPECTRE_V2_CMD_RETPOLINE, | |
1651 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, | |
d45476d9 | 1652 | SPECTRE_V2_CMD_RETPOLINE_LFENCE, |
1e19da85 PZ |
1653 | SPECTRE_V2_CMD_EIBRS, |
1654 | SPECTRE_V2_CMD_EIBRS_RETPOLINE, | |
1655 | SPECTRE_V2_CMD_EIBRS_LFENCE, | |
7c693f54 | 1656 | SPECTRE_V2_CMD_IBRS, |
15d6b7aa TG |
1657 | }; |
1658 | ||
ddfca943 DK |
1659 | static enum spectre_v2_mitigation_cmd spectre_v2_cmd __ro_after_init = SPECTRE_V2_CMD_AUTO; |
1660 | ||
fa1202ef TG |
1661 | enum spectre_v2_user_cmd { |
1662 | SPECTRE_V2_USER_CMD_NONE, | |
1663 | SPECTRE_V2_USER_CMD_AUTO, | |
1664 | SPECTRE_V2_USER_CMD_FORCE, | |
7cc765a6 | 1665 | SPECTRE_V2_USER_CMD_PRCTL, |
55a97402 | 1666 | SPECTRE_V2_USER_CMD_PRCTL_IBPB, |
6b3e64c2 | 1667 | SPECTRE_V2_USER_CMD_SECCOMP, |
55a97402 | 1668 | SPECTRE_V2_USER_CMD_SECCOMP_IBPB, |
fa1202ef TG |
1669 | }; |
1670 | ||
1671 | static const char * const spectre_v2_user_strings[] = { | |
20c3a2c3 TL |
1672 | [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", |
1673 | [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", | |
1674 | [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection", | |
1675 | [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", | |
1676 | [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", | |
fa1202ef TG |
1677 | }; |
1678 | ||
1679 | static const struct { | |
1680 | const char *option; | |
1681 | enum spectre_v2_user_cmd cmd; | |
1682 | bool secure; | |
1de7edbb | 1683 | } v2_user_options[] __initconst = { |
55a97402 TG |
1684 | { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, |
1685 | { "off", SPECTRE_V2_USER_CMD_NONE, false }, | |
1686 | { "on", SPECTRE_V2_USER_CMD_FORCE, true }, | |
1687 | { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, | |
1688 | { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, | |
1689 | { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, | |
1690 | { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, | |
fa1202ef TG |
1691 | }; |
1692 | ||
1693 | static void __init spec_v2_user_print_cond(const char *reason, bool secure) | |
1694 | { | |
1695 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) | |
1696 | pr_info("spectre_v2_user=%s forced on command line.\n", reason); | |
1697 | } | |
1698 | ||
ddfca943 | 1699 | static enum spectre_v2_user_cmd __init spectre_v2_parse_user_cmdline(void) |
fa1202ef TG |
1700 | { |
1701 | char arg[20]; | |
1702 | int ret, i; | |
1703 | ||
ddfca943 | 1704 | if (cpu_mitigations_off() || !IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2)) |
fa1202ef | 1705 | return SPECTRE_V2_USER_CMD_NONE; |
fa1202ef TG |
1706 | |
1707 | ret = cmdline_find_option(boot_command_line, "spectre_v2_user", | |
1708 | arg, sizeof(arg)); | |
1709 | if (ret < 0) | |
ddfca943 | 1710 | return SPECTRE_V2_USER_CMD_AUTO; |
fa1202ef TG |
1711 | |
1712 | for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { | |
1713 | if (match_option(arg, ret, v2_user_options[i].option)) { | |
1714 | spec_v2_user_print_cond(v2_user_options[i].option, | |
1715 | v2_user_options[i].secure); | |
1716 | return v2_user_options[i].cmd; | |
1717 | } | |
1718 | } | |
1719 | ||
98fdaeb2 | 1720 | pr_err("Unknown user space protection option (%s). Switching to default\n", arg); |
ddfca943 | 1721 | return SPECTRE_V2_USER_CMD_AUTO; |
fa1202ef TG |
1722 | } |
1723 | ||
6921ed90 KS |
1724 | static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode) |
1725 | { | |
1726 | return spectre_v2_in_eibrs_mode(mode) || mode == SPECTRE_V2_IBRS; | |
1727 | } | |
1728 | ||
ddfca943 | 1729 | static void __init spectre_v2_user_select_mitigation(void) |
fa1202ef | 1730 | { |
fa1202ef TG |
1731 | if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) |
1732 | return; | |
1733 | ||
ddfca943 | 1734 | switch (spectre_v2_parse_user_cmdline()) { |
fa1202ef | 1735 | case SPECTRE_V2_USER_CMD_NONE: |
ddfca943 | 1736 | return; |
fa1202ef | 1737 | case SPECTRE_V2_USER_CMD_FORCE: |
ddfca943 DK |
1738 | spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; |
1739 | spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; | |
fa1202ef | 1740 | break; |
2f46993d | 1741 | case SPECTRE_V2_USER_CMD_AUTO: |
7cc765a6 | 1742 | case SPECTRE_V2_USER_CMD_PRCTL: |
ddfca943 DK |
1743 | spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; |
1744 | spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; | |
1745 | break; | |
55a97402 | 1746 | case SPECTRE_V2_USER_CMD_PRCTL_IBPB: |
ddfca943 DK |
1747 | spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; |
1748 | spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; | |
7cc765a6 | 1749 | break; |
6b3e64c2 | 1750 | case SPECTRE_V2_USER_CMD_SECCOMP: |
ddfca943 DK |
1751 | if (IS_ENABLED(CONFIG_SECCOMP)) |
1752 | spectre_v2_user_ibpb = SPECTRE_V2_USER_SECCOMP; | |
1753 | else | |
1754 | spectre_v2_user_ibpb = SPECTRE_V2_USER_PRCTL; | |
1755 | spectre_v2_user_stibp = spectre_v2_user_ibpb; | |
1756 | break; | |
55a97402 | 1757 | case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: |
ddfca943 | 1758 | spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; |
6b3e64c2 | 1759 | if (IS_ENABLED(CONFIG_SECCOMP)) |
ddfca943 | 1760 | spectre_v2_user_stibp = SPECTRE_V2_USER_SECCOMP; |
6b3e64c2 | 1761 | else |
ddfca943 | 1762 | spectre_v2_user_stibp = SPECTRE_V2_USER_PRCTL; |
6b3e64c2 | 1763 | break; |
fa1202ef TG |
1764 | } |
1765 | ||
ddfca943 DK |
1766 | /* |
1767 | * At this point, an STIBP mode other than "off" has been set. | |
1768 | * If STIBP support is not being forced, check if STIBP always-on | |
1769 | * is preferred. | |
1770 | */ | |
1771 | if ((spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || | |
1772 | spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) && | |
1773 | boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON)) | |
1774 | spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; | |
4c71a2b6 | 1775 | |
ddfca943 DK |
1776 | if (!boot_cpu_has(X86_FEATURE_IBPB)) |
1777 | spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; | |
4c71a2b6 | 1778 | |
ddfca943 DK |
1779 | if (!boot_cpu_has(X86_FEATURE_STIBP)) |
1780 | spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; | |
1781 | } | |
1782 | ||
1783 | static void __init spectre_v2_user_update_mitigation(void) | |
1784 | { | |
1785 | if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) | |
1786 | return; | |
1787 | ||
1788 | /* The spectre_v2 cmd line can override spectre_v2_user options */ | |
1789 | if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) { | |
1790 | spectre_v2_user_ibpb = SPECTRE_V2_USER_NONE; | |
1791 | spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; | |
1792 | } else if (spectre_v2_cmd == SPECTRE_V2_CMD_FORCE) { | |
1793 | spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT; | |
1794 | spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT; | |
fa1202ef TG |
1795 | } |
1796 | ||
21998a35 | 1797 | /* |
fd470a8b | 1798 | * If no STIBP, Intel enhanced IBRS is enabled, or SMT impossible, STIBP |
6921ed90 KS |
1799 | * is not required. |
1800 | * | |
fd470a8b | 1801 | * Intel's Enhanced IBRS also protects against cross-thread branch target |
6921ed90 KS |
1802 | * injection in user-mode as the IBRS bit remains always set which |
1803 | * implicitly enables cross-thread protections. However, in legacy IBRS | |
1804 | * mode, the IBRS bit is set only on kernel entry and cleared on return | |
fd470a8b KP |
1805 | * to userspace. AMD Automatic IBRS also does not protect userspace. |
1806 | * These modes therefore disable the implicit cross-thread protection, | |
1807 | * so allow for STIBP to be selected in those cases. | |
21998a35 | 1808 | */ |
a5ce9f2b | 1809 | if (!boot_cpu_has(X86_FEATURE_STIBP) || |
2a08b832 | 1810 | !cpu_smt_possible() || |
fd470a8b | 1811 | (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && |
ddfca943 DK |
1812 | !boot_cpu_has(X86_FEATURE_AUTOIBRS))) { |
1813 | spectre_v2_user_stibp = SPECTRE_V2_USER_NONE; | |
fa1202ef | 1814 | return; |
ddfca943 | 1815 | } |
fa1202ef | 1816 | |
ddfca943 DK |
1817 | if (spectre_v2_user_stibp != SPECTRE_V2_USER_NONE && |
1818 | (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || | |
1819 | retbleed_mitigation == RETBLEED_MITIGATION_IBPB)) { | |
1820 | if (spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT && | |
1821 | spectre_v2_user_stibp != SPECTRE_V2_USER_STRICT_PREFERRED) | |
bcf16315 | 1822 | pr_info("Selecting STIBP always-on mode to complement retbleed mitigation\n"); |
ddfca943 | 1823 | spectre_v2_user_stibp = SPECTRE_V2_USER_STRICT_PREFERRED; |
e8ec1b6e | 1824 | } |
ddfca943 DK |
1825 | pr_info("%s\n", spectre_v2_user_strings[spectre_v2_user_stibp]); |
1826 | } | |
1827 | ||
1828 | static void __init spectre_v2_user_apply_mitigation(void) | |
1829 | { | |
1830 | /* Initialize Indirect Branch Prediction Barrier */ | |
1831 | if (spectre_v2_user_ibpb != SPECTRE_V2_USER_NONE) { | |
1832 | static_branch_enable(&switch_vcpu_ibpb); | |
e8ec1b6e | 1833 | |
ddfca943 DK |
1834 | switch (spectre_v2_user_ibpb) { |
1835 | case SPECTRE_V2_USER_STRICT: | |
1836 | static_branch_enable(&switch_mm_always_ibpb); | |
1837 | break; | |
1838 | case SPECTRE_V2_USER_PRCTL: | |
1839 | case SPECTRE_V2_USER_SECCOMP: | |
1840 | static_branch_enable(&switch_mm_cond_ibpb); | |
1841 | break; | |
1842 | default: | |
1843 | break; | |
1844 | } | |
21998a35 | 1845 | |
ddfca943 DK |
1846 | pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", |
1847 | static_key_enabled(&switch_mm_always_ibpb) ? | |
1848 | "always-on" : "conditional"); | |
1849 | } | |
fa1202ef TG |
1850 | } |
1851 | ||
8770709f | 1852 | static const char * const spectre_v2_strings[] = { |
15d6b7aa | 1853 | [SPECTRE_V2_NONE] = "Vulnerable", |
d45476d9 PZI |
1854 | [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines", |
1855 | [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE", | |
e7862eda KP |
1856 | [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced / Automatic IBRS", |
1857 | [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced / Automatic IBRS + LFENCE", | |
1858 | [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced / Automatic IBRS + Retpolines", | |
7c693f54 | 1859 | [SPECTRE_V2_IBRS] = "Mitigation: IBRS", |
15d6b7aa TG |
1860 | }; |
1861 | ||
9005c683 KA |
1862 | static const struct { |
1863 | const char *option; | |
1864 | enum spectre_v2_mitigation_cmd cmd; | |
1865 | bool secure; | |
1de7edbb | 1866 | } mitigation_options[] __initconst = { |
15d6b7aa TG |
1867 | { "off", SPECTRE_V2_CMD_NONE, false }, |
1868 | { "on", SPECTRE_V2_CMD_FORCE, true }, | |
1869 | { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, | |
d45476d9 PZI |
1870 | { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, |
1871 | { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false }, | |
15d6b7aa | 1872 | { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, |
1e19da85 PZ |
1873 | { "eibrs", SPECTRE_V2_CMD_EIBRS, false }, |
1874 | { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false }, | |
1875 | { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false }, | |
15d6b7aa | 1876 | { "auto", SPECTRE_V2_CMD_AUTO, false }, |
7c693f54 | 1877 | { "ibrs", SPECTRE_V2_CMD_IBRS, false }, |
9005c683 KA |
1878 | }; |
1879 | ||
495d470e | 1880 | static void __init spec_v2_print_cond(const char *reason, bool secure) |
15d6b7aa | 1881 | { |
495d470e | 1882 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) |
15d6b7aa TG |
1883 | pr_info("%s selected on command line.\n", reason); |
1884 | } | |
1885 | ||
da285121 DW |
1886 | static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
1887 | { | |
72c70f48 | 1888 | enum spectre_v2_mitigation_cmd cmd; |
da285121 | 1889 | char arg[20]; |
9005c683 | 1890 | int ret, i; |
9005c683 | 1891 | |
72c70f48 | 1892 | cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE; |
d68be4c4 JP |
1893 | if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || |
1894 | cpu_mitigations_off()) | |
9005c683 | 1895 | return SPECTRE_V2_CMD_NONE; |
9005c683 | 1896 | |
24848509 TC |
1897 | ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); |
1898 | if (ret < 0) | |
72c70f48 | 1899 | return cmd; |
24848509 TC |
1900 | |
1901 | for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { | |
1902 | if (!match_option(arg, ret, mitigation_options[i].option)) | |
1903 | continue; | |
1904 | cmd = mitigation_options[i].cmd; | |
1905 | break; | |
1906 | } | |
1907 | ||
1908 | if (i >= ARRAY_SIZE(mitigation_options)) { | |
72c70f48 BL |
1909 | pr_err("unknown option (%s). Switching to default mode\n", arg); |
1910 | return cmd; | |
da285121 DW |
1911 | } |
1912 | ||
9005c683 | 1913 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE || |
d45476d9 | 1914 | cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
1e19da85 PZ |
1915 | cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC || |
1916 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || | |
1917 | cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && | |
aefb2f2e | 1918 | !IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { |
1e19da85 PZ |
1919 | pr_err("%s selected but not compiled in. Switching to AUTO select\n", |
1920 | mitigation_options[i].option); | |
1921 | return SPECTRE_V2_CMD_AUTO; | |
1922 | } | |
1923 | ||
1924 | if ((cmd == SPECTRE_V2_CMD_EIBRS || | |
1925 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE || | |
1926 | cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) && | |
1927 | !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { | |
e7862eda | 1928 | pr_err("%s selected but CPU doesn't have Enhanced or Automatic IBRS. Switching to AUTO select\n", |
1e19da85 | 1929 | mitigation_options[i].option); |
da285121 | 1930 | return SPECTRE_V2_CMD_AUTO; |
9005c683 KA |
1931 | } |
1932 | ||
1e19da85 PZ |
1933 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE || |
1934 | cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) && | |
d45476d9 | 1935 | !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) { |
1e19da85 PZ |
1936 | pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n", |
1937 | mitigation_options[i].option); | |
d45476d9 PZI |
1938 | return SPECTRE_V2_CMD_AUTO; |
1939 | } | |
1940 | ||
1da8d217 | 1941 | if (cmd == SPECTRE_V2_CMD_IBRS && !IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY)) { |
f43b9876 PZ |
1942 | pr_err("%s selected but not compiled in. Switching to AUTO select\n", |
1943 | mitigation_options[i].option); | |
1944 | return SPECTRE_V2_CMD_AUTO; | |
1945 | } | |
1946 | ||
7c693f54 PG |
1947 | if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) { |
1948 | pr_err("%s selected but not Intel CPU. Switching to AUTO select\n", | |
1949 | mitigation_options[i].option); | |
1950 | return SPECTRE_V2_CMD_AUTO; | |
1951 | } | |
1952 | ||
1953 | if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) { | |
1954 | pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n", | |
1955 | mitigation_options[i].option); | |
1956 | return SPECTRE_V2_CMD_AUTO; | |
1957 | } | |
1958 | ||
6007878a | 1959 | if (cmd == SPECTRE_V2_CMD_IBRS && cpu_feature_enabled(X86_FEATURE_XENPV)) { |
7c693f54 PG |
1960 | pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n", |
1961 | mitigation_options[i].option); | |
1962 | return SPECTRE_V2_CMD_AUTO; | |
1963 | } | |
1964 | ||
495d470e TG |
1965 | spec_v2_print_cond(mitigation_options[i].option, |
1966 | mitigation_options[i].secure); | |
9005c683 | 1967 | return cmd; |
da285121 DW |
1968 | } |
1969 | ||
1e19da85 PZ |
1970 | static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void) |
1971 | { | |
aefb2f2e | 1972 | if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE)) { |
1e19da85 PZ |
1973 | pr_err("Kernel not compiled with retpoline; no mitigation available!"); |
1974 | return SPECTRE_V2_NONE; | |
1975 | } | |
1976 | ||
1e19da85 PZ |
1977 | return SPECTRE_V2_RETPOLINE; |
1978 | } | |
1979 | ||
1cea8a28 JP |
1980 | static bool __ro_after_init rrsba_disabled; |
1981 | ||
4ad3278d PG |
1982 | /* Disable in-kernel use of non-RSB RET predictors */ |
1983 | static void __init spec_ctrl_disable_kernel_rrsba(void) | |
1984 | { | |
1cea8a28 JP |
1985 | if (rrsba_disabled) |
1986 | return; | |
4ad3278d | 1987 | |
1cea8a28 JP |
1988 | if (!(x86_arch_cap_msr & ARCH_CAP_RRSBA)) { |
1989 | rrsba_disabled = true; | |
4ad3278d | 1990 | return; |
1cea8a28 | 1991 | } |
4ad3278d | 1992 | |
1cea8a28 JP |
1993 | if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL)) |
1994 | return; | |
4ad3278d | 1995 | |
1cea8a28 JP |
1996 | x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S; |
1997 | update_spec_ctrl(x86_spec_ctrl_base); | |
1998 | rrsba_disabled = true; | |
4ad3278d PG |
1999 | } |
2000 | ||
27ce8299 | 2001 | static void __init spectre_v2_select_rsb_mitigation(enum spectre_v2_mitigation mode) |
2b129932 DS |
2002 | { |
2003 | /* | |
83f6665a JP |
2004 | * WARNING! There are many subtleties to consider when changing *any* |
2005 | * code related to RSB-related mitigations. Before doing so, carefully | |
2006 | * read the following document, and update if necessary: | |
2b129932 | 2007 | * |
83f6665a | 2008 | * Documentation/admin-guide/hw-vuln/rsb.rst |
2b129932 | 2009 | * |
83f6665a | 2010 | * In an overly simplified nutshell: |
2b129932 | 2011 | * |
83f6665a JP |
2012 | * - User->user RSB attacks are conditionally mitigated during |
2013 | * context switches by cond_mitigation -> write_ibpb(). | |
2b129932 | 2014 | * |
83f6665a JP |
2015 | * - User->kernel and guest->host attacks are mitigated by eIBRS or |
2016 | * RSB filling. | |
2b129932 | 2017 | * |
83f6665a JP |
2018 | * Though, depending on config, note that other alternative |
2019 | * mitigations may end up getting used instead, e.g., IBPB on | |
2020 | * entry/vmexit, call depth tracking, or return thunks. | |
2b129932 | 2021 | */ |
83f6665a | 2022 | |
2b129932 DS |
2023 | switch (mode) { |
2024 | case SPECTRE_V2_NONE: | |
27ce8299 | 2025 | break; |
2b129932 | 2026 | |
2b129932 | 2027 | case SPECTRE_V2_EIBRS: |
18bae0df JP |
2028 | case SPECTRE_V2_EIBRS_LFENCE: |
2029 | case SPECTRE_V2_EIBRS_RETPOLINE: | |
2b129932 | 2030 | if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { |
2b129932 | 2031 | pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n"); |
18bae0df | 2032 | setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE); |
2b129932 | 2033 | } |
27ce8299 | 2034 | break; |
2b129932 | 2035 | |
2b129932 DS |
2036 | case SPECTRE_V2_RETPOLINE: |
2037 | case SPECTRE_V2_LFENCE: | |
2038 | case SPECTRE_V2_IBRS: | |
27ce8299 JP |
2039 | pr_info("Spectre v2 / SpectreRSB: Filling RSB on context switch and VMEXIT\n"); |
2040 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); | |
18bae0df | 2041 | setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT); |
27ce8299 | 2042 | break; |
2b129932 | 2043 | |
27ce8299 JP |
2044 | default: |
2045 | pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation\n"); | |
2046 | dump_stack(); | |
2047 | break; | |
2048 | } | |
2b129932 DS |
2049 | } |
2050 | ||
ec9404e4 PG |
2051 | /* |
2052 | * Set BHI_DIS_S to prevent indirect branches in kernel to be influenced by | |
2053 | * branch history in userspace. Not needed if BHI_NO is set. | |
2054 | */ | |
2055 | static bool __init spec_ctrl_bhi_dis(void) | |
2056 | { | |
2057 | if (!boot_cpu_has(X86_FEATURE_BHI_CTRL)) | |
2058 | return false; | |
2059 | ||
2060 | x86_spec_ctrl_base |= SPEC_CTRL_BHI_DIS_S; | |
2061 | update_spec_ctrl(x86_spec_ctrl_base); | |
2062 | setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_HW); | |
2063 | ||
2064 | return true; | |
2065 | } | |
2066 | ||
2067 | enum bhi_mitigations { | |
2068 | BHI_MITIGATION_OFF, | |
efe31382 | 2069 | BHI_MITIGATION_AUTO, |
ec9404e4 | 2070 | BHI_MITIGATION_ON, |
42c141fb | 2071 | BHI_MITIGATION_VMEXIT_ONLY, |
ec9404e4 PG |
2072 | }; |
2073 | ||
2074 | static enum bhi_mitigations bhi_mitigation __ro_after_init = | |
efe31382 | 2075 | IS_ENABLED(CONFIG_MITIGATION_SPECTRE_BHI) ? BHI_MITIGATION_AUTO : BHI_MITIGATION_OFF; |
ec9404e4 PG |
2076 | |
2077 | static int __init spectre_bhi_parse_cmdline(char *str) | |
2078 | { | |
2079 | if (!str) | |
2080 | return -EINVAL; | |
2081 | ||
2082 | if (!strcmp(str, "off")) | |
2083 | bhi_mitigation = BHI_MITIGATION_OFF; | |
2084 | else if (!strcmp(str, "on")) | |
2085 | bhi_mitigation = BHI_MITIGATION_ON; | |
42c141fb JP |
2086 | else if (!strcmp(str, "vmexit")) |
2087 | bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY; | |
ec9404e4 PG |
2088 | else |
2089 | pr_err("Ignoring unknown spectre_bhi option (%s)", str); | |
2090 | ||
2091 | return 0; | |
2092 | } | |
2093 | early_param("spectre_bhi", spectre_bhi_parse_cmdline); | |
2094 | ||
2095 | static void __init bhi_select_mitigation(void) | |
efe31382 DK |
2096 | { |
2097 | if (!boot_cpu_has(X86_BUG_BHI) || cpu_mitigations_off()) | |
2098 | bhi_mitigation = BHI_MITIGATION_OFF; | |
2099 | ||
2100 | if (bhi_mitigation == BHI_MITIGATION_AUTO) | |
2101 | bhi_mitigation = BHI_MITIGATION_ON; | |
2102 | } | |
2103 | ||
2104 | static void __init bhi_update_mitigation(void) | |
2105 | { | |
2106 | if (spectre_v2_cmd == SPECTRE_V2_CMD_NONE) | |
2107 | bhi_mitigation = BHI_MITIGATION_OFF; | |
2108 | ||
2109 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && | |
2110 | spectre_v2_cmd == SPECTRE_V2_CMD_AUTO) | |
2111 | bhi_mitigation = BHI_MITIGATION_OFF; | |
2112 | } | |
2113 | ||
2114 | static void __init bhi_apply_mitigation(void) | |
ec9404e4 PG |
2115 | { |
2116 | if (bhi_mitigation == BHI_MITIGATION_OFF) | |
2117 | return; | |
2118 | ||
2119 | /* Retpoline mitigates against BHI unless the CPU has RRSBA behavior */ | |
69129794 JP |
2120 | if (boot_cpu_has(X86_FEATURE_RETPOLINE) && |
2121 | !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) { | |
1cea8a28 JP |
2122 | spec_ctrl_disable_kernel_rrsba(); |
2123 | if (rrsba_disabled) | |
2124 | return; | |
2125 | } | |
ec9404e4 | 2126 | |
073fdbe0 | 2127 | if (!IS_ENABLED(CONFIG_X86_64)) |
ec9404e4 PG |
2128 | return; |
2129 | ||
073fdbe0 PG |
2130 | /* Mitigate in hardware if supported */ |
2131 | if (spec_ctrl_bhi_dis()) | |
ec9404e4 PG |
2132 | return; |
2133 | ||
42c141fb JP |
2134 | if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) { |
2135 | pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n"); | |
13327fad | 2136 | setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); |
42c141fb JP |
2137 | return; |
2138 | } | |
95a6ccbd | 2139 | |
42c141fb | 2140 | pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n"); |
ec9404e4 | 2141 | setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP); |
13327fad | 2142 | setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_VMEXIT); |
ec9404e4 PG |
2143 | } |
2144 | ||
da285121 DW |
2145 | static void __init spectre_v2_select_mitigation(void) |
2146 | { | |
480e803d | 2147 | spectre_v2_cmd = spectre_v2_parse_cmdline(); |
da285121 | 2148 | |
da285121 | 2149 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && |
480e803d | 2150 | (spectre_v2_cmd == SPECTRE_V2_CMD_NONE || spectre_v2_cmd == SPECTRE_V2_CMD_AUTO)) |
da285121 DW |
2151 | return; |
2152 | ||
480e803d | 2153 | switch (spectre_v2_cmd) { |
da285121 DW |
2154 | case SPECTRE_V2_CMD_NONE: |
2155 | return; | |
2156 | ||
2157 | case SPECTRE_V2_CMD_FORCE: | |
da285121 | 2158 | case SPECTRE_V2_CMD_AUTO: |
706d5168 | 2159 | if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) { |
480e803d | 2160 | spectre_v2_enabled = SPECTRE_V2_EIBRS; |
1e19da85 | 2161 | break; |
706d5168 | 2162 | } |
1e19da85 | 2163 | |
480e803d | 2164 | spectre_v2_enabled = spectre_v2_select_retpoline(); |
9471eee9 | 2165 | break; |
1e19da85 | 2166 | |
d45476d9 | 2167 | case SPECTRE_V2_CMD_RETPOLINE_LFENCE: |
eafd987d | 2168 | pr_err(SPECTRE_V2_LFENCE_MSG); |
480e803d | 2169 | spectre_v2_enabled = SPECTRE_V2_LFENCE; |
da285121 | 2170 | break; |
1e19da85 | 2171 | |
da285121 | 2172 | case SPECTRE_V2_CMD_RETPOLINE_GENERIC: |
480e803d | 2173 | spectre_v2_enabled = SPECTRE_V2_RETPOLINE; |
da285121 | 2174 | break; |
1e19da85 | 2175 | |
da285121 | 2176 | case SPECTRE_V2_CMD_RETPOLINE: |
480e803d | 2177 | spectre_v2_enabled = spectre_v2_select_retpoline(); |
1e19da85 PZ |
2178 | break; |
2179 | ||
7c693f54 | 2180 | case SPECTRE_V2_CMD_IBRS: |
480e803d | 2181 | spectre_v2_enabled = SPECTRE_V2_IBRS; |
7c693f54 PG |
2182 | break; |
2183 | ||
1e19da85 | 2184 | case SPECTRE_V2_CMD_EIBRS: |
480e803d | 2185 | spectre_v2_enabled = SPECTRE_V2_EIBRS; |
1e19da85 PZ |
2186 | break; |
2187 | ||
2188 | case SPECTRE_V2_CMD_EIBRS_LFENCE: | |
480e803d | 2189 | spectre_v2_enabled = SPECTRE_V2_EIBRS_LFENCE; |
1e19da85 PZ |
2190 | break; |
2191 | ||
2192 | case SPECTRE_V2_CMD_EIBRS_RETPOLINE: | |
480e803d | 2193 | spectre_v2_enabled = SPECTRE_V2_EIBRS_RETPOLINE; |
da285121 DW |
2194 | break; |
2195 | } | |
480e803d | 2196 | } |
da285121 | 2197 | |
480e803d DK |
2198 | static void __init spectre_v2_update_mitigation(void) |
2199 | { | |
6a7c3c26 PG |
2200 | if (spectre_v2_cmd == SPECTRE_V2_CMD_AUTO && |
2201 | !spectre_v2_in_eibrs_mode(spectre_v2_enabled)) { | |
480e803d DK |
2202 | if (IS_ENABLED(CONFIG_MITIGATION_IBRS_ENTRY) && |
2203 | boot_cpu_has_bug(X86_BUG_RETBLEED) && | |
2204 | retbleed_mitigation != RETBLEED_MITIGATION_NONE && | |
2205 | retbleed_mitigation != RETBLEED_MITIGATION_STUFF && | |
2206 | boot_cpu_has(X86_FEATURE_IBRS) && | |
2207 | boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) { | |
2208 | spectre_v2_enabled = SPECTRE_V2_IBRS; | |
2209 | } | |
2210 | } | |
2211 | ||
2212 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) && !cpu_mitigations_off()) | |
2213 | pr_info("%s\n", spectre_v2_strings[spectre_v2_enabled]); | |
2214 | } | |
2215 | ||
2216 | static void __init spectre_v2_apply_mitigation(void) | |
2217 | { | |
2218 | if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) | |
44a3918c JP |
2219 | pr_err(SPECTRE_V2_EIBRS_EBPF_MSG); |
2220 | ||
480e803d | 2221 | if (spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { |
e7862eda KP |
2222 | if (boot_cpu_has(X86_FEATURE_AUTOIBRS)) { |
2223 | msr_set_bit(MSR_EFER, _EFER_AUTOIBRS); | |
2224 | } else { | |
2225 | x86_spec_ctrl_base |= SPEC_CTRL_IBRS; | |
2226 | update_spec_ctrl(x86_spec_ctrl_base); | |
2227 | } | |
1e19da85 PZ |
2228 | } |
2229 | ||
480e803d | 2230 | switch (spectre_v2_enabled) { |
1e19da85 | 2231 | case SPECTRE_V2_NONE: |
480e803d DK |
2232 | return; |
2233 | ||
1e19da85 PZ |
2234 | case SPECTRE_V2_EIBRS: |
2235 | break; | |
2236 | ||
7c693f54 PG |
2237 | case SPECTRE_V2_IBRS: |
2238 | setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); | |
eb23b5ef PG |
2239 | if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) |
2240 | pr_warn(SPECTRE_V2_IBRS_PERF_MSG); | |
7c693f54 PG |
2241 | break; |
2242 | ||
1e19da85 PZ |
2243 | case SPECTRE_V2_LFENCE: |
2244 | case SPECTRE_V2_EIBRS_LFENCE: | |
d45476d9 | 2245 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE); |
1e19da85 PZ |
2246 | fallthrough; |
2247 | ||
2248 | case SPECTRE_V2_RETPOLINE: | |
2249 | case SPECTRE_V2_EIBRS_RETPOLINE: | |
da285121 | 2250 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
1e19da85 | 2251 | break; |
da285121 DW |
2252 | } |
2253 | ||
4ad3278d PG |
2254 | /* |
2255 | * Disable alternate RSB predictions in kernel when indirect CALLs and | |
2256 | * JMPs gets protection against BHI and Intramode-BTI, but RET | |
2257 | * prediction from a non-RSB predictor is still a risk. | |
2258 | */ | |
480e803d DK |
2259 | if (spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE || |
2260 | spectre_v2_enabled == SPECTRE_V2_EIBRS_RETPOLINE || | |
2261 | spectre_v2_enabled == SPECTRE_V2_RETPOLINE) | |
4ad3278d PG |
2262 | spec_ctrl_disable_kernel_rrsba(); |
2263 | ||
480e803d | 2264 | spectre_v2_select_rsb_mitigation(spectre_v2_enabled); |
9756bba2 | 2265 | |
dd84441a | 2266 | /* |
7c693f54 PG |
2267 | * Retpoline protects the kernel, but doesn't protect firmware. IBRS |
2268 | * and Enhanced IBRS protect firmware too, so enable IBRS around | |
e7862eda KP |
2269 | * firmware calls only when IBRS / Enhanced / Automatic IBRS aren't |
2270 | * otherwise enabled. | |
706d5168 | 2271 | * |
480e803d DK |
2272 | * Use "spectre_v2_enabled" to check Enhanced IBRS instead of |
2273 | * boot_cpu_has(), because the user might select retpoline on the kernel | |
2274 | * command line and if the CPU supports Enhanced IBRS, kernel might | |
2275 | * un-intentionally not enable IBRS around firmware calls. | |
dd84441a | 2276 | */ |
28a99e95 | 2277 | if (boot_cpu_has_bug(X86_BUG_RETBLEED) && |
571c30b1 | 2278 | boot_cpu_has(X86_FEATURE_IBPB) && |
28a99e95 PZ |
2279 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
2280 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { | |
2281 | ||
e3b78a7a | 2282 | if (retbleed_mitigation != RETBLEED_MITIGATION_IBPB) { |
28a99e95 PZ |
2283 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); |
2284 | pr_info("Enabling Speculation Barrier for firmware calls\n"); | |
2285 | } | |
2286 | ||
480e803d DK |
2287 | } else if (boot_cpu_has(X86_FEATURE_IBRS) && |
2288 | !spectre_v2_in_ibrs_mode(spectre_v2_enabled)) { | |
dd84441a DW |
2289 | setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); |
2290 | pr_info("Enabling Restricted Speculation for firmware calls\n"); | |
2291 | } | |
da285121 DW |
2292 | } |
2293 | ||
6893a959 | 2294 | static void update_stibp_msr(void * __unused) |
15d6b7aa | 2295 | { |
56aa4d22 | 2296 | u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP); |
66065157 | 2297 | update_spec_ctrl(val); |
15d6b7aa TG |
2298 | } |
2299 | ||
6893a959 TG |
2300 | /* Update x86_spec_ctrl_base in case SMT state changed. */ |
2301 | static void update_stibp_strict(void) | |
15d6b7aa | 2302 | { |
6893a959 TG |
2303 | u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; |
2304 | ||
2305 | if (sched_smt_active()) | |
2306 | mask |= SPEC_CTRL_STIBP; | |
2307 | ||
2308 | if (mask == x86_spec_ctrl_base) | |
2309 | return; | |
2310 | ||
2311 | pr_info("Update user space SMT mitigation: STIBP %s\n", | |
2312 | mask & SPEC_CTRL_STIBP ? "always-on" : "off"); | |
2313 | x86_spec_ctrl_base = mask; | |
2314 | on_each_cpu(update_stibp_msr, NULL, 1); | |
15d6b7aa TG |
2315 | } |
2316 | ||
7cc765a6 TG |
2317 | /* Update the static key controlling the evaluation of TIF_SPEC_IB */ |
2318 | static void update_indir_branch_cond(void) | |
2319 | { | |
2320 | if (sched_smt_active()) | |
2321 | static_branch_enable(&switch_to_cond_stibp); | |
2322 | else | |
2323 | static_branch_disable(&switch_to_cond_stibp); | |
2324 | } | |
2325 | ||
39226ef0 JP |
2326 | #undef pr_fmt |
2327 | #define pr_fmt(fmt) fmt | |
2328 | ||
bc124170 TG |
2329 | /* Update the static key controlling the MDS CPU buffer clear in idle */ |
2330 | static void update_mds_branch_idle(void) | |
2331 | { | |
2332 | /* | |
2333 | * Enable the idle clearing if SMT is active on CPUs which are | |
2334 | * affected only by MSBDS and not any other MDS variant. | |
2335 | * | |
2336 | * The other variants cannot be mitigated when SMT is enabled, so | |
2337 | * clearing the buffers on idle just to prevent the Store Buffer | |
2338 | * repartitioning leak would be a window dressing exercise. | |
2339 | */ | |
2340 | if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY)) | |
2341 | return; | |
2342 | ||
99a83db5 | 2343 | if (sched_smt_active()) { |
f9af88a3 | 2344 | static_branch_enable(&cpu_buf_idle_clear); |
99a83db5 | 2345 | } else if (mmio_mitigation == MMIO_MITIGATION_OFF || |
d0485730 | 2346 | (x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) { |
f9af88a3 | 2347 | static_branch_disable(&cpu_buf_idle_clear); |
99a83db5 | 2348 | } |
bc124170 TG |
2349 | } |
2350 | ||
39226ef0 | 2351 | #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n" |
1b42f017 | 2352 | #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n" |
1dc6ff02 | 2353 | #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n" |
39226ef0 | 2354 | |
9c92374b | 2355 | void cpu_bugs_smt_update(void) |
15d6b7aa | 2356 | { |
15d6b7aa TG |
2357 | mutex_lock(&spec_ctrl_mutex); |
2358 | ||
0de05d05 JP |
2359 | if (sched_smt_active() && unprivileged_ebpf_enabled() && |
2360 | spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) | |
2361 | pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG); | |
2362 | ||
21998a35 | 2363 | switch (spectre_v2_user_stibp) { |
6893a959 TG |
2364 | case SPECTRE_V2_USER_NONE: |
2365 | break; | |
2366 | case SPECTRE_V2_USER_STRICT: | |
20c3a2c3 | 2367 | case SPECTRE_V2_USER_STRICT_PREFERRED: |
6893a959 TG |
2368 | update_stibp_strict(); |
2369 | break; | |
9137bb27 | 2370 | case SPECTRE_V2_USER_PRCTL: |
6b3e64c2 | 2371 | case SPECTRE_V2_USER_SECCOMP: |
7cc765a6 | 2372 | update_indir_branch_cond(); |
9137bb27 | 2373 | break; |
15d6b7aa | 2374 | } |
6893a959 | 2375 | |
22dd8365 TG |
2376 | switch (mds_mitigation) { |
2377 | case MDS_MITIGATION_FULL: | |
b8ce25df | 2378 | case MDS_MITIGATION_AUTO: |
22dd8365 | 2379 | case MDS_MITIGATION_VMWERV: |
39226ef0 JP |
2380 | if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY)) |
2381 | pr_warn_once(MDS_MSG_SMT); | |
bc124170 | 2382 | update_mds_branch_idle(); |
22dd8365 TG |
2383 | break; |
2384 | case MDS_MITIGATION_OFF: | |
2385 | break; | |
2386 | } | |
bc124170 | 2387 | |
1b42f017 PG |
2388 | switch (taa_mitigation) { |
2389 | case TAA_MITIGATION_VERW: | |
b8ce25df | 2390 | case TAA_MITIGATION_AUTO: |
1b42f017 PG |
2391 | case TAA_MITIGATION_UCODE_NEEDED: |
2392 | if (sched_smt_active()) | |
2393 | pr_warn_once(TAA_MSG_SMT); | |
2394 | break; | |
2395 | case TAA_MITIGATION_TSX_DISABLED: | |
2396 | case TAA_MITIGATION_OFF: | |
2397 | break; | |
2398 | } | |
2399 | ||
1dc6ff02 JP |
2400 | switch (mmio_mitigation) { |
2401 | case MMIO_MITIGATION_VERW: | |
b8ce25df | 2402 | case MMIO_MITIGATION_AUTO: |
1dc6ff02 JP |
2403 | case MMIO_MITIGATION_UCODE_NEEDED: |
2404 | if (sched_smt_active()) | |
2405 | pr_warn_once(MMIO_MSG_SMT); | |
2406 | break; | |
2407 | case MMIO_MITIGATION_OFF: | |
2408 | break; | |
2409 | } | |
2410 | ||
d8010d4b BPA |
2411 | switch (tsa_mitigation) { |
2412 | case TSA_MITIGATION_USER_KERNEL: | |
2413 | case TSA_MITIGATION_VM: | |
2414 | case TSA_MITIGATION_AUTO: | |
2415 | case TSA_MITIGATION_FULL: | |
2416 | /* | |
2417 | * TSA-SQ can potentially lead to info leakage between | |
2418 | * SMT threads. | |
2419 | */ | |
2420 | if (sched_smt_active()) | |
2421 | static_branch_enable(&cpu_buf_idle_clear); | |
2422 | else | |
2423 | static_branch_disable(&cpu_buf_idle_clear); | |
2424 | break; | |
2425 | case TSA_MITIGATION_NONE: | |
2426 | case TSA_MITIGATION_UCODE_NEEDED: | |
2427 | break; | |
2428 | } | |
2429 | ||
15d6b7aa TG |
2430 | mutex_unlock(&spec_ctrl_mutex); |
2431 | } | |
2432 | ||
24f7fc83 KRW |
2433 | #undef pr_fmt |
2434 | #define pr_fmt(fmt) "Speculative Store Bypass: " fmt | |
2435 | ||
f9544b2b | 2436 | static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE; |
24f7fc83 KRW |
2437 | |
2438 | /* The kernel command line selection */ | |
2439 | enum ssb_mitigation_cmd { | |
2440 | SPEC_STORE_BYPASS_CMD_NONE, | |
2441 | SPEC_STORE_BYPASS_CMD_AUTO, | |
2442 | SPEC_STORE_BYPASS_CMD_ON, | |
a73ec77e | 2443 | SPEC_STORE_BYPASS_CMD_PRCTL, |
f21b53b2 | 2444 | SPEC_STORE_BYPASS_CMD_SECCOMP, |
24f7fc83 KRW |
2445 | }; |
2446 | ||
8770709f | 2447 | static const char * const ssb_strings[] = { |
24f7fc83 | 2448 | [SPEC_STORE_BYPASS_NONE] = "Vulnerable", |
a73ec77e | 2449 | [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", |
f21b53b2 KC |
2450 | [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", |
2451 | [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp", | |
24f7fc83 KRW |
2452 | }; |
2453 | ||
2454 | static const struct { | |
2455 | const char *option; | |
2456 | enum ssb_mitigation_cmd cmd; | |
1de7edbb | 2457 | } ssb_mitigation_options[] __initconst = { |
f21b53b2 KC |
2458 | { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ |
2459 | { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ | |
2460 | { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ | |
2461 | { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */ | |
2462 | { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */ | |
24f7fc83 KRW |
2463 | }; |
2464 | ||
2465 | static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void) | |
2466 | { | |
b908cdab | 2467 | enum ssb_mitigation_cmd cmd; |
24f7fc83 KRW |
2468 | char arg[20]; |
2469 | int ret, i; | |
2470 | ||
b908cdab BL |
2471 | cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ? |
2472 | SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE; | |
d68be4c4 JP |
2473 | if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") || |
2474 | cpu_mitigations_off()) { | |
24f7fc83 KRW |
2475 | return SPEC_STORE_BYPASS_CMD_NONE; |
2476 | } else { | |
2477 | ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable", | |
2478 | arg, sizeof(arg)); | |
2479 | if (ret < 0) | |
b908cdab | 2480 | return cmd; |
24f7fc83 KRW |
2481 | |
2482 | for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) { | |
2483 | if (!match_option(arg, ret, ssb_mitigation_options[i].option)) | |
2484 | continue; | |
2485 | ||
2486 | cmd = ssb_mitigation_options[i].cmd; | |
2487 | break; | |
2488 | } | |
2489 | ||
2490 | if (i >= ARRAY_SIZE(ssb_mitigation_options)) { | |
b908cdab BL |
2491 | pr_err("unknown option (%s). Switching to default mode\n", arg); |
2492 | return cmd; | |
24f7fc83 KRW |
2493 | } |
2494 | } | |
2495 | ||
2496 | return cmd; | |
2497 | } | |
2498 | ||
5ece59a2 | 2499 | static void __init ssb_select_mitigation(void) |
24f7fc83 | 2500 | { |
24f7fc83 KRW |
2501 | enum ssb_mitigation_cmd cmd; |
2502 | ||
9f65fb29 | 2503 | if (!boot_cpu_has(X86_FEATURE_SSBD)) |
5ece59a2 | 2504 | goto out; |
24f7fc83 KRW |
2505 | |
2506 | cmd = ssb_parse_cmdline(); | |
2507 | if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) && | |
2508 | (cmd == SPEC_STORE_BYPASS_CMD_NONE || | |
2509 | cmd == SPEC_STORE_BYPASS_CMD_AUTO)) | |
5ece59a2 | 2510 | return; |
24f7fc83 KRW |
2511 | |
2512 | switch (cmd) { | |
f21b53b2 KC |
2513 | case SPEC_STORE_BYPASS_CMD_SECCOMP: |
2514 | /* | |
2515 | * Choose prctl+seccomp as the default mode if seccomp is | |
2516 | * enabled. | |
2517 | */ | |
2518 | if (IS_ENABLED(CONFIG_SECCOMP)) | |
5ece59a2 | 2519 | ssb_mode = SPEC_STORE_BYPASS_SECCOMP; |
f21b53b2 | 2520 | else |
5ece59a2 | 2521 | ssb_mode = SPEC_STORE_BYPASS_PRCTL; |
a73ec77e | 2522 | break; |
24f7fc83 | 2523 | case SPEC_STORE_BYPASS_CMD_ON: |
5ece59a2 | 2524 | ssb_mode = SPEC_STORE_BYPASS_DISABLE; |
24f7fc83 | 2525 | break; |
2f46993d | 2526 | case SPEC_STORE_BYPASS_CMD_AUTO: |
a73ec77e | 2527 | case SPEC_STORE_BYPASS_CMD_PRCTL: |
5ece59a2 | 2528 | ssb_mode = SPEC_STORE_BYPASS_PRCTL; |
a73ec77e | 2529 | break; |
24f7fc83 KRW |
2530 | case SPEC_STORE_BYPASS_CMD_NONE: |
2531 | break; | |
2532 | } | |
2533 | ||
5ece59a2 DK |
2534 | out: |
2535 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) | |
2536 | pr_info("%s\n", ssb_strings[ssb_mode]); | |
2537 | } | |
2538 | ||
2539 | static void __init ssb_apply_mitigation(void) | |
2540 | { | |
77243971 KRW |
2541 | /* |
2542 | * We have three CPU feature flags that are in play here: | |
2543 | * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible. | |
9f65fb29 | 2544 | * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass |
77243971 KRW |
2545 | * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation |
2546 | */ | |
5ece59a2 | 2547 | if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) { |
24f7fc83 | 2548 | setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE); |
77243971 | 2549 | /* |
6ac2f49e KRW |
2550 | * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may |
2551 | * use a completely different MSR and bit dependent on family. | |
77243971 | 2552 | */ |
612bc3b3 TL |
2553 | if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) && |
2554 | !static_cpu_has(X86_FEATURE_AMD_SSBD)) { | |
108fab4b | 2555 | x86_amd_ssb_disable(); |
612bc3b3 | 2556 | } else { |
9f65fb29 | 2557 | x86_spec_ctrl_base |= SPEC_CTRL_SSBD; |
66065157 | 2558 | update_spec_ctrl(x86_spec_ctrl_base); |
77243971 KRW |
2559 | } |
2560 | } | |
24f7fc83 KRW |
2561 | } |
2562 | ||
da285121 | 2563 | #undef pr_fmt |
f21b53b2 | 2564 | #define pr_fmt(fmt) "Speculation prctl: " fmt |
da285121 | 2565 | |
6d991ba5 | 2566 | static void task_update_spec_tif(struct task_struct *tsk) |
a73ec77e | 2567 | { |
6d991ba5 TG |
2568 | /* Force the update of the real TIF bits */ |
2569 | set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); | |
e6da8bb6 TG |
2570 | |
2571 | /* | |
2572 | * Immediately update the speculation control MSRs for the current | |
2573 | * task, but for a non-current task delay setting the CPU | |
2574 | * mitigation until it is scheduled next. | |
2575 | * | |
2576 | * This can only happen for SECCOMP mitigation. For PRCTL it's | |
2577 | * always the current task. | |
2578 | */ | |
6d991ba5 | 2579 | if (tsk == current) |
e6da8bb6 TG |
2580 | speculation_ctrl_update_current(); |
2581 | } | |
2582 | ||
e893bb1b BS |
2583 | static int l1d_flush_prctl_set(struct task_struct *task, unsigned long ctrl) |
2584 | { | |
2585 | ||
2586 | if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) | |
2587 | return -EPERM; | |
2588 | ||
2589 | switch (ctrl) { | |
2590 | case PR_SPEC_ENABLE: | |
2591 | set_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); | |
2592 | return 0; | |
2593 | case PR_SPEC_DISABLE: | |
2594 | clear_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH); | |
2595 | return 0; | |
2596 | default: | |
2597 | return -ERANGE; | |
2598 | } | |
2599 | } | |
2600 | ||
e6da8bb6 TG |
2601 | static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) |
2602 | { | |
f21b53b2 KC |
2603 | if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && |
2604 | ssb_mode != SPEC_STORE_BYPASS_SECCOMP) | |
a73ec77e TG |
2605 | return -ENXIO; |
2606 | ||
356e4bff TG |
2607 | switch (ctrl) { |
2608 | case PR_SPEC_ENABLE: | |
2609 | /* If speculation is force disabled, enable is not allowed */ | |
2610 | if (task_spec_ssb_force_disable(task)) | |
2611 | return -EPERM; | |
2612 | task_clear_spec_ssb_disable(task); | |
71368af9 | 2613 | task_clear_spec_ssb_noexec(task); |
6d991ba5 | 2614 | task_update_spec_tif(task); |
356e4bff TG |
2615 | break; |
2616 | case PR_SPEC_DISABLE: | |
2617 | task_set_spec_ssb_disable(task); | |
71368af9 | 2618 | task_clear_spec_ssb_noexec(task); |
6d991ba5 | 2619 | task_update_spec_tif(task); |
356e4bff TG |
2620 | break; |
2621 | case PR_SPEC_FORCE_DISABLE: | |
2622 | task_set_spec_ssb_disable(task); | |
2623 | task_set_spec_ssb_force_disable(task); | |
71368af9 WL |
2624 | task_clear_spec_ssb_noexec(task); |
2625 | task_update_spec_tif(task); | |
2626 | break; | |
2627 | case PR_SPEC_DISABLE_NOEXEC: | |
2628 | if (task_spec_ssb_force_disable(task)) | |
2629 | return -EPERM; | |
2630 | task_set_spec_ssb_disable(task); | |
2631 | task_set_spec_ssb_noexec(task); | |
6d991ba5 | 2632 | task_update_spec_tif(task); |
356e4bff TG |
2633 | break; |
2634 | default: | |
2635 | return -ERANGE; | |
2636 | } | |
a73ec77e TG |
2637 | return 0; |
2638 | } | |
2639 | ||
1978b3a5 AM |
2640 | static bool is_spec_ib_user_controlled(void) |
2641 | { | |
2642 | return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || | |
2643 | spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || | |
2644 | spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL || | |
2645 | spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP; | |
2646 | } | |
2647 | ||
9137bb27 TG |
2648 | static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) |
2649 | { | |
2650 | switch (ctrl) { | |
2651 | case PR_SPEC_ENABLE: | |
21998a35 AS |
2652 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
2653 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) | |
9137bb27 | 2654 | return 0; |
1978b3a5 | 2655 | |
9137bb27 | 2656 | /* |
1978b3a5 AM |
2657 | * With strict mode for both IBPB and STIBP, the instruction |
2658 | * code paths avoid checking this task flag and instead, | |
2659 | * unconditionally run the instruction. However, STIBP and IBPB | |
2660 | * are independent and either can be set to conditionally | |
2661 | * enabled regardless of the mode of the other. | |
2662 | * | |
2663 | * If either is set to conditional, allow the task flag to be | |
2664 | * updated, unless it was force-disabled by a previous prctl | |
2665 | * call. Currently, this is possible on an AMD CPU which has the | |
2666 | * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the | |
2667 | * kernel is booted with 'spectre_v2_user=seccomp', then | |
2668 | * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and | |
2669 | * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED. | |
9137bb27 | 2670 | */ |
1978b3a5 | 2671 | if (!is_spec_ib_user_controlled() || |
4d8df8cb | 2672 | task_spec_ib_force_disable(task)) |
9137bb27 | 2673 | return -EPERM; |
1978b3a5 | 2674 | |
9137bb27 TG |
2675 | task_clear_spec_ib_disable(task); |
2676 | task_update_spec_tif(task); | |
2677 | break; | |
2678 | case PR_SPEC_DISABLE: | |
2679 | case PR_SPEC_FORCE_DISABLE: | |
2680 | /* | |
2681 | * Indirect branch speculation is always allowed when | |
2682 | * mitigation is force disabled. | |
2683 | */ | |
21998a35 AS |
2684 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
2685 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) | |
9137bb27 | 2686 | return -EPERM; |
1978b3a5 AM |
2687 | |
2688 | if (!is_spec_ib_user_controlled()) | |
9137bb27 | 2689 | return 0; |
1978b3a5 | 2690 | |
9137bb27 TG |
2691 | task_set_spec_ib_disable(task); |
2692 | if (ctrl == PR_SPEC_FORCE_DISABLE) | |
2693 | task_set_spec_ib_force_disable(task); | |
2694 | task_update_spec_tif(task); | |
bd9a8542 | 2695 | if (task == current) |
a664ec91 | 2696 | indirect_branch_prediction_barrier(); |
9137bb27 TG |
2697 | break; |
2698 | default: | |
2699 | return -ERANGE; | |
2700 | } | |
2701 | return 0; | |
2702 | } | |
2703 | ||
8bf37d8c TG |
2704 | int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, |
2705 | unsigned long ctrl) | |
2706 | { | |
2707 | switch (which) { | |
2708 | case PR_SPEC_STORE_BYPASS: | |
2709 | return ssb_prctl_set(task, ctrl); | |
9137bb27 TG |
2710 | case PR_SPEC_INDIRECT_BRANCH: |
2711 | return ib_prctl_set(task, ctrl); | |
e893bb1b BS |
2712 | case PR_SPEC_L1D_FLUSH: |
2713 | return l1d_flush_prctl_set(task, ctrl); | |
8bf37d8c TG |
2714 | default: |
2715 | return -ENODEV; | |
2716 | } | |
2717 | } | |
2718 | ||
2719 | #ifdef CONFIG_SECCOMP | |
2720 | void arch_seccomp_spec_mitigate(struct task_struct *task) | |
2721 | { | |
f21b53b2 KC |
2722 | if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) |
2723 | ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); | |
21998a35 AS |
2724 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || |
2725 | spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP) | |
6b3e64c2 | 2726 | ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
8bf37d8c TG |
2727 | } |
2728 | #endif | |
2729 | ||
e893bb1b BS |
2730 | static int l1d_flush_prctl_get(struct task_struct *task) |
2731 | { | |
2732 | if (!static_branch_unlikely(&switch_mm_cond_l1d_flush)) | |
2733 | return PR_SPEC_FORCE_DISABLE; | |
2734 | ||
2735 | if (test_ti_thread_flag(&task->thread_info, TIF_SPEC_L1D_FLUSH)) | |
2736 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; | |
2737 | else | |
2738 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; | |
2739 | } | |
2740 | ||
7bbf1373 | 2741 | static int ssb_prctl_get(struct task_struct *task) |
a73ec77e TG |
2742 | { |
2743 | switch (ssb_mode) { | |
0a0ce0da JP |
2744 | case SPEC_STORE_BYPASS_NONE: |
2745 | if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS)) | |
2746 | return PR_SPEC_ENABLE; | |
2747 | return PR_SPEC_NOT_AFFECTED; | |
a73ec77e TG |
2748 | case SPEC_STORE_BYPASS_DISABLE: |
2749 | return PR_SPEC_DISABLE; | |
f21b53b2 | 2750 | case SPEC_STORE_BYPASS_SECCOMP: |
a73ec77e | 2751 | case SPEC_STORE_BYPASS_PRCTL: |
356e4bff TG |
2752 | if (task_spec_ssb_force_disable(task)) |
2753 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; | |
71368af9 WL |
2754 | if (task_spec_ssb_noexec(task)) |
2755 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC; | |
356e4bff | 2756 | if (task_spec_ssb_disable(task)) |
a73ec77e TG |
2757 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; |
2758 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; | |
a73ec77e | 2759 | } |
0a0ce0da | 2760 | BUG(); |
a73ec77e TG |
2761 | } |
2762 | ||
9137bb27 TG |
2763 | static int ib_prctl_get(struct task_struct *task) |
2764 | { | |
2765 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | |
2766 | return PR_SPEC_NOT_AFFECTED; | |
2767 | ||
21998a35 AS |
2768 | if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && |
2769 | spectre_v2_user_stibp == SPECTRE_V2_USER_NONE) | |
9137bb27 | 2770 | return PR_SPEC_ENABLE; |
1978b3a5 | 2771 | else if (is_spec_ib_user_controlled()) { |
9137bb27 TG |
2772 | if (task_spec_ib_force_disable(task)) |
2773 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; | |
2774 | if (task_spec_ib_disable(task)) | |
2775 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; | |
2776 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; | |
1978b3a5 AM |
2777 | } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || |
2778 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || | |
2779 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED) | |
2780 | return PR_SPEC_DISABLE; | |
2781 | else | |
9137bb27 | 2782 | return PR_SPEC_NOT_AFFECTED; |
9137bb27 TG |
2783 | } |
2784 | ||
7bbf1373 | 2785 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
a73ec77e TG |
2786 | { |
2787 | switch (which) { | |
2788 | case PR_SPEC_STORE_BYPASS: | |
7bbf1373 | 2789 | return ssb_prctl_get(task); |
9137bb27 TG |
2790 | case PR_SPEC_INDIRECT_BRANCH: |
2791 | return ib_prctl_get(task); | |
e893bb1b BS |
2792 | case PR_SPEC_L1D_FLUSH: |
2793 | return l1d_flush_prctl_get(task); | |
a73ec77e TG |
2794 | default: |
2795 | return -ENODEV; | |
2796 | } | |
2797 | } | |
2798 | ||
77243971 KRW |
2799 | void x86_spec_ctrl_setup_ap(void) |
2800 | { | |
7eb8956a | 2801 | if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) |
66065157 | 2802 | update_spec_ctrl(x86_spec_ctrl_base); |
764f3c21 KRW |
2803 | |
2804 | if (ssb_mode == SPEC_STORE_BYPASS_DISABLE) | |
9f65fb29 | 2805 | x86_amd_ssb_disable(); |
77243971 KRW |
2806 | } |
2807 | ||
b8e8c830 PB |
2808 | bool itlb_multihit_kvm_mitigation; |
2809 | EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation); | |
2810 | ||
56563f53 KRW |
2811 | #undef pr_fmt |
2812 | #define pr_fmt(fmt) "L1TF: " fmt | |
72c6d2db | 2813 | |
d90a7a0e | 2814 | /* Default mitigation for L1TF-affected CPUs */ |
3a4ee4ff | 2815 | enum l1tf_mitigations l1tf_mitigation __ro_after_init = |
d43ba2dc | 2816 | IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_AUTO : L1TF_MITIGATION_OFF; |
72c6d2db | 2817 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
d90a7a0e | 2818 | EXPORT_SYMBOL_GPL(l1tf_mitigation); |
1eb46908 | 2819 | #endif |
895ae47f | 2820 | enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO; |
72c6d2db | 2821 | EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation); |
72c6d2db | 2822 | |
cc51e542 AK |
2823 | /* |
2824 | * These CPUs all support 44bits physical address space internally in the | |
2825 | * cache but CPUID can report a smaller number of physical address bits. | |
2826 | * | |
2827 | * The L1TF mitigation uses the top most address bit for the inversion of | |
2828 | * non present PTEs. When the installed memory reaches into the top most | |
2829 | * address bit due to memory holes, which has been observed on machines | |
2830 | * which report 36bits physical address bits and have 32G RAM installed, | |
2831 | * then the mitigation range check in l1tf_select_mitigation() triggers. | |
2832 | * This is a false positive because the mitigation is still possible due to | |
2833 | * the fact that the cache uses 44bit internally. Use the cache bits | |
2834 | * instead of the reported physical bits and adjust them on the affected | |
2835 | * machines to 44bit if the reported bits are less than 44. | |
2836 | */ | |
2837 | static void override_cache_bits(struct cpuinfo_x86 *c) | |
2838 | { | |
2839 | if (c->x86 != 6) | |
2840 | return; | |
2841 | ||
8a28b022 TL |
2842 | switch (c->x86_vfm) { |
2843 | case INTEL_NEHALEM: | |
2844 | case INTEL_WESTMERE: | |
2845 | case INTEL_SANDYBRIDGE: | |
2846 | case INTEL_IVYBRIDGE: | |
2847 | case INTEL_HASWELL: | |
2848 | case INTEL_HASWELL_L: | |
2849 | case INTEL_HASWELL_G: | |
2850 | case INTEL_BROADWELL: | |
2851 | case INTEL_BROADWELL_G: | |
2852 | case INTEL_SKYLAKE_L: | |
2853 | case INTEL_SKYLAKE: | |
2854 | case INTEL_KABYLAKE_L: | |
2855 | case INTEL_KABYLAKE: | |
cc51e542 AK |
2856 | if (c->x86_cache_bits < 44) |
2857 | c->x86_cache_bits = 44; | |
2858 | break; | |
2859 | } | |
2860 | } | |
2861 | ||
56563f53 | 2862 | static void __init l1tf_select_mitigation(void) |
d43ba2dc DK |
2863 | { |
2864 | if (!boot_cpu_has_bug(X86_BUG_L1TF) || cpu_mitigations_off()) { | |
2865 | l1tf_mitigation = L1TF_MITIGATION_OFF; | |
2866 | return; | |
2867 | } | |
2868 | ||
2869 | if (l1tf_mitigation == L1TF_MITIGATION_AUTO) { | |
2870 | if (cpu_mitigations_auto_nosmt()) | |
2871 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; | |
2872 | else | |
2873 | l1tf_mitigation = L1TF_MITIGATION_FLUSH; | |
2874 | } | |
2875 | } | |
2876 | ||
2877 | static void __init l1tf_apply_mitigation(void) | |
56563f53 KRW |
2878 | { |
2879 | u64 half_pa; | |
2880 | ||
2881 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) | |
2882 | return; | |
2883 | ||
cc51e542 AK |
2884 | override_cache_bits(&boot_cpu_data); |
2885 | ||
d90a7a0e JK |
2886 | switch (l1tf_mitigation) { |
2887 | case L1TF_MITIGATION_OFF: | |
2888 | case L1TF_MITIGATION_FLUSH_NOWARN: | |
2889 | case L1TF_MITIGATION_FLUSH: | |
d43ba2dc | 2890 | case L1TF_MITIGATION_AUTO: |
d90a7a0e JK |
2891 | break; |
2892 | case L1TF_MITIGATION_FLUSH_NOSMT: | |
2893 | case L1TF_MITIGATION_FULL: | |
2894 | cpu_smt_disable(false); | |
2895 | break; | |
2896 | case L1TF_MITIGATION_FULL_FORCE: | |
2897 | cpu_smt_disable(true); | |
2898 | break; | |
2899 | } | |
2900 | ||
56563f53 KRW |
2901 | #if CONFIG_PGTABLE_LEVELS == 2 |
2902 | pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n"); | |
2903 | return; | |
2904 | #endif | |
2905 | ||
56563f53 | 2906 | half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT; |
5b5e4d62 MH |
2907 | if (l1tf_mitigation != L1TF_MITIGATION_OFF && |
2908 | e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) { | |
56563f53 | 2909 | pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n"); |
6a012288 VB |
2910 | pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n", |
2911 | half_pa); | |
2912 | pr_info("However, doing so will make a part of your RAM unusable.\n"); | |
65fd4cb6 | 2913 | pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n"); |
56563f53 KRW |
2914 | return; |
2915 | } | |
2916 | ||
2917 | setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV); | |
2918 | } | |
d90a7a0e JK |
2919 | |
2920 | static int __init l1tf_cmdline(char *str) | |
2921 | { | |
2922 | if (!boot_cpu_has_bug(X86_BUG_L1TF)) | |
2923 | return 0; | |
2924 | ||
2925 | if (!str) | |
2926 | return -EINVAL; | |
2927 | ||
2928 | if (!strcmp(str, "off")) | |
2929 | l1tf_mitigation = L1TF_MITIGATION_OFF; | |
2930 | else if (!strcmp(str, "flush,nowarn")) | |
2931 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN; | |
2932 | else if (!strcmp(str, "flush")) | |
2933 | l1tf_mitigation = L1TF_MITIGATION_FLUSH; | |
2934 | else if (!strcmp(str, "flush,nosmt")) | |
2935 | l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT; | |
2936 | else if (!strcmp(str, "full")) | |
2937 | l1tf_mitigation = L1TF_MITIGATION_FULL; | |
2938 | else if (!strcmp(str, "full,force")) | |
2939 | l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE; | |
2940 | ||
2941 | return 0; | |
2942 | } | |
2943 | early_param("l1tf", l1tf_cmdline); | |
2944 | ||
fb3bd914 BPA |
2945 | #undef pr_fmt |
2946 | #define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt | |
2947 | ||
2948 | enum srso_mitigation { | |
2949 | SRSO_MITIGATION_NONE, | |
1f4bb068 | 2950 | SRSO_MITIGATION_AUTO, |
dc6306ad JP |
2951 | SRSO_MITIGATION_UCODE_NEEDED, |
2952 | SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED, | |
fb3bd914 BPA |
2953 | SRSO_MITIGATION_MICROCODE, |
2954 | SRSO_MITIGATION_SAFE_RET, | |
233d6f68 | 2955 | SRSO_MITIGATION_IBPB, |
d893832d | 2956 | SRSO_MITIGATION_IBPB_ON_VMEXIT, |
8442df2b | 2957 | SRSO_MITIGATION_BP_SPEC_REDUCE, |
fb3bd914 BPA |
2958 | }; |
2959 | ||
fb3bd914 | 2960 | static const char * const srso_strings[] = { |
dc6306ad JP |
2961 | [SRSO_MITIGATION_NONE] = "Vulnerable", |
2962 | [SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", | |
2963 | [SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode", | |
2964 | [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET", | |
2965 | [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET", | |
2966 | [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", | |
8442df2b BP |
2967 | [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only", |
2968 | [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation" | |
fb3bd914 BPA |
2969 | }; |
2970 | ||
1f4bb068 | 2971 | static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_AUTO; |
fb3bd914 BPA |
2972 | |
2973 | static int __init srso_parse_cmdline(char *str) | |
2974 | { | |
2975 | if (!str) | |
2976 | return -EINVAL; | |
2977 | ||
2978 | if (!strcmp(str, "off")) | |
1f4bb068 | 2979 | srso_mitigation = SRSO_MITIGATION_NONE; |
fb3bd914 | 2980 | else if (!strcmp(str, "microcode")) |
1f4bb068 | 2981 | srso_mitigation = SRSO_MITIGATION_MICROCODE; |
fb3bd914 | 2982 | else if (!strcmp(str, "safe-ret")) |
1f4bb068 | 2983 | srso_mitigation = SRSO_MITIGATION_SAFE_RET; |
233d6f68 | 2984 | else if (!strcmp(str, "ibpb")) |
1f4bb068 | 2985 | srso_mitigation = SRSO_MITIGATION_IBPB; |
d893832d | 2986 | else if (!strcmp(str, "ibpb-vmexit")) |
1f4bb068 | 2987 | srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; |
fb3bd914 BPA |
2988 | else |
2989 | pr_err("Ignoring unknown SRSO option (%s).", str); | |
2990 | ||
2991 | return 0; | |
2992 | } | |
2993 | early_param("spec_rstack_overflow", srso_parse_cmdline); | |
2994 | ||
2995 | #define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." | |
2996 | ||
2997 | static void __init srso_select_mitigation(void) | |
2998 | { | |
1f4bb068 | 2999 | bool has_microcode; |
fb3bd914 | 3000 | |
1f4bb068 DK |
3001 | if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) |
3002 | srso_mitigation = SRSO_MITIGATION_NONE; | |
fb3bd914 | 3003 | |
1f4bb068 DK |
3004 | if (srso_mitigation == SRSO_MITIGATION_NONE) |
3005 | return; | |
3006 | ||
3007 | if (srso_mitigation == SRSO_MITIGATION_AUTO) | |
3008 | srso_mitigation = SRSO_MITIGATION_SAFE_RET; | |
3009 | ||
3010 | has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE); | |
dc6306ad | 3011 | if (has_microcode) { |
1b5277c0 BPA |
3012 | /* |
3013 | * Zen1/2 with SMT off aren't vulnerable after the right | |
3014 | * IBPB microcode has been applied. | |
3015 | */ | |
6405b72e | 3016 | if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { |
1b5277c0 | 3017 | setup_force_cpu_cap(X86_FEATURE_SRSO_NO); |
1f4bb068 DK |
3018 | srso_mitigation = SRSO_MITIGATION_NONE; |
3019 | return; | |
233d6f68 | 3020 | } |
dc6306ad JP |
3021 | } else { |
3022 | pr_warn("IBPB-extending microcode not applied!\n"); | |
3023 | pr_warn(SRSO_NOTICE); | |
233d6f68 BPA |
3024 | } |
3025 | ||
1f4bb068 DK |
3026 | switch (srso_mitigation) { |
3027 | case SRSO_MITIGATION_SAFE_RET: | |
3028 | if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO)) { | |
3029 | srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; | |
87781880 | 3030 | goto ibpb_on_vmexit; |
1f4bb068 | 3031 | } |
87781880 | 3032 | |
1f4bb068 | 3033 | if (!IS_ENABLED(CONFIG_MITIGATION_SRSO)) { |
a033eec9 | 3034 | pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n"); |
1f4bb068 | 3035 | srso_mitigation = SRSO_MITIGATION_NONE; |
fb3bd914 | 3036 | } |
fb3bd914 | 3037 | |
1f4bb068 DK |
3038 | if (!has_microcode) |
3039 | srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED; | |
d893832d | 3040 | break; |
87781880 | 3041 | ibpb_on_vmexit: |
1f4bb068 | 3042 | case SRSO_MITIGATION_IBPB_ON_VMEXIT: |
8442df2b BP |
3043 | if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) { |
3044 | pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n"); | |
3045 | srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE; | |
3046 | break; | |
3047 | } | |
1f4bb068 DK |
3048 | fallthrough; |
3049 | case SRSO_MITIGATION_IBPB: | |
3050 | if (!IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) { | |
318e8c33 | 3051 | pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n"); |
1f4bb068 | 3052 | srso_mitigation = SRSO_MITIGATION_NONE; |
318e8c33 | 3053 | } |
1f4bb068 DK |
3054 | |
3055 | if (!has_microcode) | |
3056 | srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED; | |
d893832d | 3057 | break; |
1dbb6b14 DK |
3058 | default: |
3059 | break; | |
fb3bd914 | 3060 | } |
1f4bb068 | 3061 | } |
fb3bd914 | 3062 | |
1f4bb068 DK |
3063 | static void __init srso_update_mitigation(void) |
3064 | { | |
3065 | /* If retbleed is using IBPB, that works for SRSO as well */ | |
3066 | if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB && | |
3067 | boot_cpu_has(X86_FEATURE_IBPB_BRTYPE)) | |
3068 | srso_mitigation = SRSO_MITIGATION_IBPB; | |
3069 | ||
891d3b8b BPA |
3070 | if (boot_cpu_has_bug(X86_BUG_SRSO) && |
3071 | !cpu_mitigations_off() && | |
3072 | !boot_cpu_has(X86_FEATURE_SRSO_NO)) | |
1f4bb068 DK |
3073 | pr_info("%s\n", srso_strings[srso_mitigation]); |
3074 | } | |
3075 | ||
3076 | static void __init srso_apply_mitigation(void) | |
3077 | { | |
8442df2b BP |
3078 | /* |
3079 | * Clear the feature flag if this mitigation is not selected as that | |
3080 | * feature flag controls the BpSpecReduce MSR bit toggling in KVM. | |
3081 | */ | |
3082 | if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE) | |
3083 | setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE); | |
3084 | ||
1f4bb068 DK |
3085 | if (srso_mitigation == SRSO_MITIGATION_NONE) { |
3086 | if (boot_cpu_has(X86_FEATURE_SBPB)) | |
3087 | x86_pred_cmd = PRED_CMD_SBPB; | |
3088 | return; | |
3089 | } | |
3090 | ||
3091 | switch (srso_mitigation) { | |
3092 | case SRSO_MITIGATION_SAFE_RET: | |
3093 | case SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED: | |
3094 | /* | |
3095 | * Enable the return thunk for generated code | |
3096 | * like ftrace, static_call, etc. | |
3097 | */ | |
3098 | setup_force_cpu_cap(X86_FEATURE_RETHUNK); | |
3099 | setup_force_cpu_cap(X86_FEATURE_UNRET); | |
3100 | ||
3101 | if (boot_cpu_data.x86 == 0x19) { | |
3102 | setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); | |
a0f3fe54 | 3103 | set_return_thunk(srso_alias_return_thunk); |
1f4bb068 DK |
3104 | } else { |
3105 | setup_force_cpu_cap(X86_FEATURE_SRSO); | |
a0f3fe54 | 3106 | set_return_thunk(srso_return_thunk); |
1f4bb068 DK |
3107 | } |
3108 | break; | |
3109 | case SRSO_MITIGATION_IBPB: | |
3110 | setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); | |
3111 | /* | |
3112 | * IBPB on entry already obviates the need for | |
3113 | * software-based untraining so clear those in case some | |
3114 | * other mitigation like Retbleed has selected them. | |
3115 | */ | |
3116 | setup_clear_cpu_cap(X86_FEATURE_UNRET); | |
3117 | setup_clear_cpu_cap(X86_FEATURE_RETHUNK); | |
3118 | fallthrough; | |
3119 | case SRSO_MITIGATION_IBPB_ON_VMEXIT: | |
3120 | setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); | |
3121 | /* | |
3122 | * There is no need for RSB filling: entry_ibpb() ensures | |
3123 | * all predictions, including the RSB, are invalidated, | |
3124 | * regardless of IBPB implementation. | |
3125 | */ | |
3126 | setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT); | |
3127 | break; | |
3128 | default: | |
3129 | break; | |
3130 | } | |
fb3bd914 BPA |
3131 | } |
3132 | ||
56563f53 | 3133 | #undef pr_fmt |
39226ef0 | 3134 | #define pr_fmt(fmt) fmt |
56563f53 | 3135 | |
61dc0f55 | 3136 | #ifdef CONFIG_SYSFS |
d1059518 | 3137 | |
72c6d2db TG |
3138 | #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" |
3139 | ||
3140 | #if IS_ENABLED(CONFIG_KVM_INTEL) | |
8770709f | 3141 | static const char * const l1tf_vmx_states[] = { |
a7b9020b TG |
3142 | [VMENTER_L1D_FLUSH_AUTO] = "auto", |
3143 | [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", | |
3144 | [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", | |
3145 | [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes", | |
3146 | [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled", | |
8e0b2b91 | 3147 | [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary" |
72c6d2db TG |
3148 | }; |
3149 | ||
3150 | static ssize_t l1tf_show_state(char *buf) | |
3151 | { | |
3152 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) | |
1d30800c | 3153 | return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); |
72c6d2db | 3154 | |
ea156d19 PB |
3155 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || |
3156 | (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && | |
130d6f94 | 3157 | sched_smt_active())) { |
1d30800c BP |
3158 | return sysfs_emit(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, |
3159 | l1tf_vmx_states[l1tf_vmx_mitigation]); | |
130d6f94 | 3160 | } |
ea156d19 | 3161 | |
1d30800c BP |
3162 | return sysfs_emit(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, |
3163 | l1tf_vmx_states[l1tf_vmx_mitigation], | |
3164 | sched_smt_active() ? "vulnerable" : "disabled"); | |
72c6d2db | 3165 | } |
b8e8c830 PB |
3166 | |
3167 | static ssize_t itlb_multihit_show_state(char *buf) | |
3168 | { | |
f29dfa53 PG |
3169 | if (!boot_cpu_has(X86_FEATURE_MSR_IA32_FEAT_CTL) || |
3170 | !boot_cpu_has(X86_FEATURE_VMX)) | |
1d30800c | 3171 | return sysfs_emit(buf, "KVM: Mitigation: VMX unsupported\n"); |
f29dfa53 | 3172 | else if (!(cr4_read_shadow() & X86_CR4_VMXE)) |
1d30800c | 3173 | return sysfs_emit(buf, "KVM: Mitigation: VMX disabled\n"); |
f29dfa53 | 3174 | else if (itlb_multihit_kvm_mitigation) |
1d30800c | 3175 | return sysfs_emit(buf, "KVM: Mitigation: Split huge pages\n"); |
b8e8c830 | 3176 | else |
1d30800c | 3177 | return sysfs_emit(buf, "KVM: Vulnerable\n"); |
b8e8c830 | 3178 | } |
72c6d2db TG |
3179 | #else |
3180 | static ssize_t l1tf_show_state(char *buf) | |
3181 | { | |
1d30800c | 3182 | return sysfs_emit(buf, "%s\n", L1TF_DEFAULT_MSG); |
72c6d2db | 3183 | } |
72c6d2db | 3184 | |
db4d30fb VT |
3185 | static ssize_t itlb_multihit_show_state(char *buf) |
3186 | { | |
1d30800c | 3187 | return sysfs_emit(buf, "Processor vulnerable\n"); |
db4d30fb | 3188 | } |
b8e8c830 | 3189 | #endif |
db4d30fb | 3190 | |
8a4b06d3 TG |
3191 | static ssize_t mds_show_state(char *buf) |
3192 | { | |
517c3ba0 | 3193 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { |
1d30800c BP |
3194 | return sysfs_emit(buf, "%s; SMT Host state unknown\n", |
3195 | mds_strings[mds_mitigation]); | |
8a4b06d3 TG |
3196 | } |
3197 | ||
3198 | if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) { | |
1d30800c BP |
3199 | return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], |
3200 | (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" : | |
3201 | sched_smt_active() ? "mitigated" : "disabled")); | |
8a4b06d3 TG |
3202 | } |
3203 | ||
1d30800c BP |
3204 | return sysfs_emit(buf, "%s; SMT %s\n", mds_strings[mds_mitigation], |
3205 | sched_smt_active() ? "vulnerable" : "disabled"); | |
8a4b06d3 TG |
3206 | } |
3207 | ||
6608b45a PG |
3208 | static ssize_t tsx_async_abort_show_state(char *buf) |
3209 | { | |
3210 | if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) || | |
3211 | (taa_mitigation == TAA_MITIGATION_OFF)) | |
1d30800c | 3212 | return sysfs_emit(buf, "%s\n", taa_strings[taa_mitigation]); |
6608b45a PG |
3213 | |
3214 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { | |
1d30800c BP |
3215 | return sysfs_emit(buf, "%s; SMT Host state unknown\n", |
3216 | taa_strings[taa_mitigation]); | |
6608b45a PG |
3217 | } |
3218 | ||
1d30800c BP |
3219 | return sysfs_emit(buf, "%s; SMT %s\n", taa_strings[taa_mitigation], |
3220 | sched_smt_active() ? "vulnerable" : "disabled"); | |
6608b45a PG |
3221 | } |
3222 | ||
8d50cdf8 PG |
3223 | static ssize_t mmio_stale_data_show_state(char *buf) |
3224 | { | |
3225 | if (mmio_mitigation == MMIO_MITIGATION_OFF) | |
3226 | return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]); | |
3227 | ||
3228 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { | |
3229 | return sysfs_emit(buf, "%s; SMT Host state unknown\n", | |
3230 | mmio_strings[mmio_mitigation]); | |
3231 | } | |
3232 | ||
3233 | return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation], | |
3234 | sched_smt_active() ? "vulnerable" : "disabled"); | |
3235 | } | |
3236 | ||
8076fcde PG |
3237 | static ssize_t rfds_show_state(char *buf) |
3238 | { | |
3239 | return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]); | |
3240 | } | |
3241 | ||
4e2c7197 DH |
3242 | static ssize_t old_microcode_show_state(char *buf) |
3243 | { | |
3244 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) | |
3245 | return sysfs_emit(buf, "Unknown: running under hypervisor"); | |
3246 | ||
3247 | return sysfs_emit(buf, "Vulnerable\n"); | |
3248 | } | |
3249 | ||
f4818881 PG |
3250 | static ssize_t its_show_state(char *buf) |
3251 | { | |
3252 | return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]); | |
3253 | } | |
3254 | ||
a8f76ae4 TC |
3255 | static char *stibp_state(void) |
3256 | { | |
fd470a8b KP |
3257 | if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) && |
3258 | !boot_cpu_has(X86_FEATURE_AUTOIBRS)) | |
34bce7c9 TC |
3259 | return ""; |
3260 | ||
21998a35 | 3261 | switch (spectre_v2_user_stibp) { |
fa1202ef | 3262 | case SPECTRE_V2_USER_NONE: |
0cd01ac5 | 3263 | return "; STIBP: disabled"; |
fa1202ef | 3264 | case SPECTRE_V2_USER_STRICT: |
0cd01ac5 | 3265 | return "; STIBP: forced"; |
20c3a2c3 | 3266 | case SPECTRE_V2_USER_STRICT_PREFERRED: |
0cd01ac5 | 3267 | return "; STIBP: always-on"; |
9137bb27 | 3268 | case SPECTRE_V2_USER_PRCTL: |
6b3e64c2 | 3269 | case SPECTRE_V2_USER_SECCOMP: |
7cc765a6 | 3270 | if (static_key_enabled(&switch_to_cond_stibp)) |
0cd01ac5 | 3271 | return "; STIBP: conditional"; |
fa1202ef TG |
3272 | } |
3273 | return ""; | |
a8f76ae4 TC |
3274 | } |
3275 | ||
3276 | static char *ibpb_state(void) | |
3277 | { | |
4c71a2b6 | 3278 | if (boot_cpu_has(X86_FEATURE_IBPB)) { |
7cc765a6 | 3279 | if (static_key_enabled(&switch_mm_always_ibpb)) |
0cd01ac5 | 3280 | return "; IBPB: always-on"; |
7cc765a6 | 3281 | if (static_key_enabled(&switch_mm_cond_ibpb)) |
0cd01ac5 JP |
3282 | return "; IBPB: conditional"; |
3283 | return "; IBPB: disabled"; | |
4c71a2b6 TG |
3284 | } |
3285 | return ""; | |
a8f76ae4 TC |
3286 | } |
3287 | ||
2b129932 DS |
3288 | static char *pbrsb_eibrs_state(void) |
3289 | { | |
3290 | if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) { | |
3291 | if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) || | |
3292 | boot_cpu_has(X86_FEATURE_RSB_VMEXIT)) | |
0cd01ac5 | 3293 | return "; PBRSB-eIBRS: SW sequence"; |
2b129932 | 3294 | else |
0cd01ac5 | 3295 | return "; PBRSB-eIBRS: Vulnerable"; |
2b129932 | 3296 | } else { |
0cd01ac5 | 3297 | return "; PBRSB-eIBRS: Not affected"; |
2b129932 DS |
3298 | } |
3299 | } | |
3300 | ||
04f4230e | 3301 | static const char *spectre_bhi_state(void) |
ec9404e4 PG |
3302 | { |
3303 | if (!boot_cpu_has_bug(X86_BUG_BHI)) | |
3304 | return "; BHI: Not affected"; | |
69129794 | 3305 | else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_HW)) |
ec9404e4 | 3306 | return "; BHI: BHI_DIS_S"; |
69129794 | 3307 | else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_LOOP)) |
95a6ccbd | 3308 | return "; BHI: SW loop, KVM: SW loop"; |
69129794 JP |
3309 | else if (boot_cpu_has(X86_FEATURE_RETPOLINE) && |
3310 | !boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE) && | |
3311 | rrsba_disabled) | |
ec9404e4 | 3312 | return "; BHI: Retpoline"; |
13327fad | 3313 | else if (boot_cpu_has(X86_FEATURE_CLEAR_BHB_VMEXIT)) |
5f882f3b | 3314 | return "; BHI: Vulnerable, KVM: SW loop"; |
ec9404e4 | 3315 | |
5f882f3b | 3316 | return "; BHI: Vulnerable"; |
ec9404e4 PG |
3317 | } |
3318 | ||
44a3918c JP |
3319 | static ssize_t spectre_v2_show_state(char *buf) |
3320 | { | |
eafd987d | 3321 | if (spectre_v2_enabled == SPECTRE_V2_LFENCE) |
1d30800c | 3322 | return sysfs_emit(buf, "Vulnerable: LFENCE\n"); |
eafd987d | 3323 | |
44a3918c | 3324 | if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled()) |
1d30800c | 3325 | return sysfs_emit(buf, "Vulnerable: eIBRS with unprivileged eBPF\n"); |
0de05d05 JP |
3326 | |
3327 | if (sched_smt_active() && unprivileged_ebpf_enabled() && | |
3328 | spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE) | |
1d30800c | 3329 | return sysfs_emit(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n"); |
44a3918c | 3330 | |
ec9404e4 | 3331 | return sysfs_emit(buf, "%s%s%s%s%s%s%s%s\n", |
1d30800c BP |
3332 | spectre_v2_strings[spectre_v2_enabled], |
3333 | ibpb_state(), | |
0cd01ac5 | 3334 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? "; IBRS_FW" : "", |
1d30800c | 3335 | stibp_state(), |
0cd01ac5 | 3336 | boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? "; RSB filling" : "", |
1d30800c | 3337 | pbrsb_eibrs_state(), |
ec9404e4 PG |
3338 | spectre_bhi_state(), |
3339 | /* this should always be at the end */ | |
1d30800c | 3340 | spectre_v2_module_string()); |
44a3918c JP |
3341 | } |
3342 | ||
7e5b3c26 MG |
3343 | static ssize_t srbds_show_state(char *buf) |
3344 | { | |
1d30800c | 3345 | return sysfs_emit(buf, "%s\n", srbds_strings[srbds_mitigation]); |
7e5b3c26 MG |
3346 | } |
3347 | ||
6b80b59b AC |
3348 | static ssize_t retbleed_show_state(char *buf) |
3349 | { | |
e6cfcdda KP |
3350 | if (retbleed_mitigation == RETBLEED_MITIGATION_UNRET || |
3351 | retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { | |
1d30800c BP |
3352 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && |
3353 | boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) | |
3354 | return sysfs_emit(buf, "Vulnerable: untrained return thunk / IBPB on non-AMD based uarch\n"); | |
e8ec1b6e | 3355 | |
1d30800c BP |
3356 | return sysfs_emit(buf, "%s; SMT %s\n", retbleed_strings[retbleed_mitigation], |
3357 | !sched_smt_active() ? "disabled" : | |
3358 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT || | |
3359 | spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ? | |
3360 | "enabled with STIBP protection" : "vulnerable"); | |
e8ec1b6e | 3361 | } |
7fbf47c7 | 3362 | |
1d30800c | 3363 | return sysfs_emit(buf, "%s\n", retbleed_strings[retbleed_mitigation]); |
6b80b59b AC |
3364 | } |
3365 | ||
fb3bd914 BPA |
3366 | static ssize_t srso_show_state(char *buf) |
3367 | { | |
e9fbc47b | 3368 | if (boot_cpu_has(X86_FEATURE_SRSO_NO)) |
6405b72e | 3369 | return sysfs_emit(buf, "Mitigation: SMT disabled\n"); |
e9fbc47b | 3370 | |
dc6306ad | 3371 | return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]); |
fb3bd914 BPA |
3372 | } |
3373 | ||
8974eb58 DS |
3374 | static ssize_t gds_show_state(char *buf) |
3375 | { | |
3376 | return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); | |
3377 | } | |
3378 | ||
d8010d4b BPA |
3379 | static ssize_t tsa_show_state(char *buf) |
3380 | { | |
3381 | return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]); | |
3382 | } | |
3383 | ||
7bb4d366 | 3384 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
ffed645e | 3385 | char *buf, unsigned int bug) |
61dc0f55 | 3386 | { |
d1059518 | 3387 | if (!boot_cpu_has_bug(bug)) |
1d30800c | 3388 | return sysfs_emit(buf, "Not affected\n"); |
d1059518 KRW |
3389 | |
3390 | switch (bug) { | |
3391 | case X86_BUG_CPU_MELTDOWN: | |
3392 | if (boot_cpu_has(X86_FEATURE_PTI)) | |
1d30800c | 3393 | return sysfs_emit(buf, "Mitigation: PTI\n"); |
d1059518 | 3394 | |
6cb2b08f | 3395 | if (hypervisor_is_type(X86_HYPER_XEN_PV)) |
1d30800c | 3396 | return sysfs_emit(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n"); |
6cb2b08f | 3397 | |
d1059518 KRW |
3398 | break; |
3399 | ||
3400 | case X86_BUG_SPECTRE_V1: | |
1d30800c | 3401 | return sysfs_emit(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]); |
d1059518 KRW |
3402 | |
3403 | case X86_BUG_SPECTRE_V2: | |
44a3918c | 3404 | return spectre_v2_show_state(buf); |
d1059518 | 3405 | |
24f7fc83 | 3406 | case X86_BUG_SPEC_STORE_BYPASS: |
1d30800c | 3407 | return sysfs_emit(buf, "%s\n", ssb_strings[ssb_mode]); |
24f7fc83 | 3408 | |
17dbca11 AK |
3409 | case X86_BUG_L1TF: |
3410 | if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) | |
72c6d2db | 3411 | return l1tf_show_state(buf); |
17dbca11 | 3412 | break; |
8a4b06d3 TG |
3413 | |
3414 | case X86_BUG_MDS: | |
3415 | return mds_show_state(buf); | |
3416 | ||
6608b45a PG |
3417 | case X86_BUG_TAA: |
3418 | return tsx_async_abort_show_state(buf); | |
3419 | ||
db4d30fb VT |
3420 | case X86_BUG_ITLB_MULTIHIT: |
3421 | return itlb_multihit_show_state(buf); | |
3422 | ||
7e5b3c26 MG |
3423 | case X86_BUG_SRBDS: |
3424 | return srbds_show_state(buf); | |
3425 | ||
8d50cdf8 PG |
3426 | case X86_BUG_MMIO_STALE_DATA: |
3427 | return mmio_stale_data_show_state(buf); | |
3428 | ||
6b80b59b AC |
3429 | case X86_BUG_RETBLEED: |
3430 | return retbleed_show_state(buf); | |
3431 | ||
fb3bd914 BPA |
3432 | case X86_BUG_SRSO: |
3433 | return srso_show_state(buf); | |
3434 | ||
8974eb58 DS |
3435 | case X86_BUG_GDS: |
3436 | return gds_show_state(buf); | |
3437 | ||
8076fcde PG |
3438 | case X86_BUG_RFDS: |
3439 | return rfds_show_state(buf); | |
3440 | ||
4e2c7197 DH |
3441 | case X86_BUG_OLD_MICROCODE: |
3442 | return old_microcode_show_state(buf); | |
3443 | ||
f4818881 PG |
3444 | case X86_BUG_ITS: |
3445 | return its_show_state(buf); | |
3446 | ||
d8010d4b BPA |
3447 | case X86_BUG_TSA: |
3448 | return tsa_show_state(buf); | |
3449 | ||
d1059518 KRW |
3450 | default: |
3451 | break; | |
3452 | } | |
3453 | ||
1d30800c | 3454 | return sysfs_emit(buf, "Vulnerable\n"); |
61dc0f55 TG |
3455 | } |
3456 | ||
d1059518 KRW |
3457 | ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) |
3458 | { | |
3459 | return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN); | |
3460 | } | |
3461 | ||
21e433bd | 3462 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) |
61dc0f55 | 3463 | { |
d1059518 | 3464 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1); |
61dc0f55 TG |
3465 | } |
3466 | ||
21e433bd | 3467 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) |
61dc0f55 | 3468 | { |
d1059518 | 3469 | return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2); |
61dc0f55 | 3470 | } |
c456442c KRW |
3471 | |
3472 | ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) | |
3473 | { | |
3474 | return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS); | |
3475 | } | |
17dbca11 AK |
3476 | |
3477 | ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) | |
3478 | { | |
3479 | return cpu_show_common(dev, attr, buf, X86_BUG_L1TF); | |
3480 | } | |
8a4b06d3 TG |
3481 | |
3482 | ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf) | |
3483 | { | |
3484 | return cpu_show_common(dev, attr, buf, X86_BUG_MDS); | |
3485 | } | |
6608b45a PG |
3486 | |
3487 | ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf) | |
3488 | { | |
3489 | return cpu_show_common(dev, attr, buf, X86_BUG_TAA); | |
3490 | } | |
db4d30fb VT |
3491 | |
3492 | ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf) | |
3493 | { | |
3494 | return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT); | |
3495 | } | |
7e5b3c26 MG |
3496 | |
3497 | ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf) | |
3498 | { | |
3499 | return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); | |
3500 | } | |
8d50cdf8 PG |
3501 | |
3502 | ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf) | |
3503 | { | |
dd86a1d0 | 3504 | return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA); |
8d50cdf8 | 3505 | } |
6b80b59b AC |
3506 | |
3507 | ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf) | |
3508 | { | |
3509 | return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); | |
3510 | } | |
fb3bd914 BPA |
3511 | |
3512 | ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) | |
3513 | { | |
3514 | return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); | |
3515 | } | |
64094e7e | 3516 | |
8974eb58 DS |
3517 | ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) |
3518 | { | |
3519 | return cpu_show_common(dev, attr, buf, X86_BUG_GDS); | |
3520 | } | |
8076fcde PG |
3521 | |
3522 | ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attribute *attr, char *buf) | |
3523 | { | |
3524 | return cpu_show_common(dev, attr, buf, X86_BUG_RFDS); | |
3525 | } | |
4e2c7197 DH |
3526 | |
3527 | ssize_t cpu_show_old_microcode(struct device *dev, struct device_attribute *attr, char *buf) | |
3528 | { | |
3529 | return cpu_show_common(dev, attr, buf, X86_BUG_OLD_MICROCODE); | |
3530 | } | |
c4070e19 | 3531 | |
f4818881 PG |
3532 | ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf) |
3533 | { | |
3534 | return cpu_show_common(dev, attr, buf, X86_BUG_ITS); | |
3535 | } | |
d8010d4b BPA |
3536 | |
3537 | ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf) | |
3538 | { | |
3539 | return cpu_show_common(dev, attr, buf, X86_BUG_TSA); | |
3540 | } | |
61dc0f55 | 3541 | #endif |
4461438a JP |
3542 | |
3543 | void __warn_thunk(void) | |
3544 | { | |
3545 | WARN_ONCE(1, "Unpatched return thunk in use. This should not happen!\n"); | |
3546 | } |