x86/fpu: Reset MXCSR to default in kernel_fpu_begin()
[linux-block.git] / arch / x86 / kernel / cpu / intel.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2#include <linux/kernel.h>
65fddcfc 3#include <linux/pgtable.h>
1da177e4
LT
4
5#include <linux/string.h>
6#include <linux/bitops.h>
7#include <linux/smp.h>
83ce4009 8#include <linux/sched.h>
e6017571 9#include <linux/sched/clock.h>
1da177e4 10#include <linux/thread_info.h>
186f4360 11#include <linux/init.h>
8bdbd962 12#include <linux/uaccess.h>
1da177e4 13
cd4d09ec 14#include <asm/cpufeature.h>
1da177e4 15#include <asm/msr.h>
73bdb73f 16#include <asm/bugs.h>
1f442d70 17#include <asm/cpu.h>
08e237fa 18#include <asm/intel-family.h>
4167709b 19#include <asm/microcode_intel.h>
e16fd002
GA
20#include <asm/hwcap2.h>
21#include <asm/elf.h>
6650cdd9
PZI
22#include <asm/cpu_device_id.h>
23#include <asm/cmdline.h>
d7e94dbd 24#include <asm/traps.h>
923f3a2b 25#include <asm/resctrl.h>
1da177e4 26
185f3b9d 27#ifdef CONFIG_X86_64
8bdbd962 28#include <linux/topology.h>
185f3b9d
YL
29#endif
30
1da177e4
LT
31#include "cpu.h"
32
33#ifdef CONFIG_X86_LOCAL_APIC
34#include <asm/mpspec.h>
35#include <asm/apic.h>
1da177e4
LT
36#endif
37
6650cdd9
PZI
38enum split_lock_detect_state {
39 sld_off = 0,
40 sld_warn,
41 sld_fatal,
42};
43
44/*
45 * Default to sld_off because most systems do not support split lock detection
46 * split_lock_setup() will switch this to sld_warn on systems that support
47 * split lock detect, unless there is a command line override.
48 */
dbaba470 49static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
a6a60741 50static u64 msr_test_ctrl_cache __ro_after_init;
6650cdd9 51
1e03bff3
RN
52/*
53 * Processors which have self-snooping capability can handle conflicting
54 * memory type across CPUs by snooping its own cache. However, there exists
55 * CPU models in which having conflicting memory types still leads to
56 * unpredictable behavior, machine check errors, or hangs. Clear this
57 * feature to prevent its use on machines with known erratas.
58 */
59static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
60{
61 switch (c->x86_model) {
62 case INTEL_FAM6_CORE_YONAH:
63 case INTEL_FAM6_CORE2_MEROM:
64 case INTEL_FAM6_CORE2_MEROM_L:
65 case INTEL_FAM6_CORE2_PENRYN:
66 case INTEL_FAM6_CORE2_DUNNINGTON:
67 case INTEL_FAM6_NEHALEM:
68 case INTEL_FAM6_NEHALEM_G:
69 case INTEL_FAM6_NEHALEM_EP:
70 case INTEL_FAM6_NEHALEM_EX:
71 case INTEL_FAM6_WESTMERE:
72 case INTEL_FAM6_WESTMERE_EP:
73 case INTEL_FAM6_SANDYBRIDGE:
74 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
75 }
76}
77
e16fd002
GA
78static bool ring3mwait_disabled __read_mostly;
79
80static int __init ring3mwait_disable(char *__unused)
81{
82 ring3mwait_disabled = true;
83 return 0;
84}
85__setup("ring3mwait=disable", ring3mwait_disable);
86
87static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
88{
89 /*
90 * Ring 3 MONITOR/MWAIT feature cannot be detected without
91 * cpu model and family comparison.
92 */
4d8bb006 93 if (c->x86 != 6)
e16fd002 94 return;
4d8bb006
PL
95 switch (c->x86_model) {
96 case INTEL_FAM6_XEON_PHI_KNL:
97 case INTEL_FAM6_XEON_PHI_KNM:
98 break;
99 default:
100 return;
101 }
e16fd002 102
e9ea1e7f 103 if (ring3mwait_disabled)
e16fd002 104 return;
e16fd002
GA
105
106 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
e9ea1e7f
KH
107 this_cpu_or(msr_misc_features_shadow,
108 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
e16fd002
GA
109
110 if (c == &boot_cpu_data)
111 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
112}
113
a5b29663
DW
114/*
115 * Early microcode releases for the Spectre v2 mitigation were broken.
116 * Information taken from;
e3b3121f 117 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
a5b29663
DW
118 * - https://kb.vmware.com/s/article/52345
119 * - Microcode revisions observed in the wild
120 * - Release note from 20180108 microcode release
121 */
122struct sku_microcode {
123 u8 model;
124 u8 stepping;
125 u32 microcode;
126};
127static const struct sku_microcode spectre_bad_microcodes[] = {
c66f78a6
PZ
128 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
129 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
130 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
af239c44
PZ
131 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
132 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
a5b29663
DW
133 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
134 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
c66f78a6 135 { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
5e741407 136 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
5ebb34ed
PZ
137 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
138 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
a5b29663 139 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
af239c44 140 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
5e741407 141 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
c66f78a6 142 { INTEL_FAM6_HASWELL, 0x03, 0x23 },
a5b29663
DW
143 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
144 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
145 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
a5b29663
DW
146 /* Observed in the wild */
147 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
148 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
149};
150
151static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
152{
153 int i;
154
36268223
KRW
155 /*
156 * We know that the hypervisor lie to us on the microcode version so
157 * we may as well hope that it is running the correct version.
158 */
159 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
160 return false;
161
1ab534e8
AK
162 if (c->x86 != 6)
163 return false;
164
a5b29663
DW
165 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
166 if (c->x86_model == spectre_bad_microcodes[i].model &&
b399151c 167 c->x86_stepping == spectre_bad_microcodes[i].stepping)
a5b29663
DW
168 return (c->microcode <= spectre_bad_microcodes[i].microcode);
169 }
170 return false;
171}
172
148f9bb8 173static void early_init_intel(struct cpuinfo_x86 *c)
1da177e4 174{
161ec53c
FY
175 u64 misc_enable;
176
99fb4d34 177 /* Unmask CPUID levels if masked: */
30a0fb94 178 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
0b131be8
PA
179 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
180 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
99fb4d34 181 c->cpuid_level = cpuid_eax(0);
d900329e 182 get_cpu_cap(c);
99fb4d34 183 }
066941bd
PA
184 }
185
2b16a235
AK
186 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
187 (c->x86 == 0x6 && c->x86_model >= 0x0e))
188 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
185f3b9d 189
4167709b
BP
190 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
191 c->microcode = intel_get_microcode_revision();
506ed6b5 192
2961298e 193 /* Now if any of them are set, check the blacklist and clear the lot */
7fcae111
DW
194 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
195 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
196 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
2961298e
DW
197 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
198 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
7fcae111
DW
199 setup_clear_cpu_cap(X86_FEATURE_IBRS);
200 setup_clear_cpu_cap(X86_FEATURE_IBPB);
201 setup_clear_cpu_cap(X86_FEATURE_STIBP);
202 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
7eb8956a 203 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
7fcae111 204 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
9f65fb29 205 setup_clear_cpu_cap(X86_FEATURE_SSBD);
52817587 206 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
a5b29663
DW
207 }
208
7a0fc404
PA
209 /*
210 * Atom erratum AAE44/AAF40/AAG38/AAH41:
211 *
212 * A race condition between speculative fetches and invalidating
213 * a large page. This is worked around in microcode, but we
214 * need the microcode to have already been loaded... so if it is
215 * not, recommend a BIOS update and disable large pages.
216 */
b399151c 217 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 &&
30963c0a 218 c->microcode < 0x20e) {
1b74dde7 219 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
30963c0a 220 clear_cpu_cap(c, X86_FEATURE_PSE);
7a0fc404
PA
221 }
222
185f3b9d
YL
223#ifdef CONFIG_X86_64
224 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
225#else
226 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
227 if (c->x86 == 15 && c->x86_cache_alignment == 64)
228 c->x86_cache_alignment = 128;
229#endif
40fb1715 230
13c6c532
JB
231 /* CPUID workaround for 0F33/0F34 CPU */
232 if (c->x86 == 0xF && c->x86_model == 0x3
b399151c 233 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
13c6c532
JB
234 c->x86_phys_bits = 36;
235
40fb1715
VP
236 /*
237 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
83ce4009
IM
238 * with P/T states and does not stop in deep C-states.
239 *
240 * It is also reliable across cores and sockets. (but not across
241 * cabinets - we turn it off in that case explicitly.)
40fb1715
VP
242 */
243 if (c->x86_power & (1 << 8)) {
244 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
245 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
246 }
247
c54fdbb2
FT
248 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
249 if (c->x86 == 6) {
250 switch (c->x86_model) {
bba10c5c
RT
251 case INTEL_FAM6_ATOM_SALTWELL_MID:
252 case INTEL_FAM6_ATOM_SALTWELL_TABLET:
253 case INTEL_FAM6_ATOM_SILVERMONT_MID:
0cc5359d 254 case INTEL_FAM6_ATOM_AIRMONT_NP:
c54fdbb2
FT
255 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
256 break;
257 default:
258 break;
259 }
260 }
261
75a04811
PA
262 /*
263 * There is a known erratum on Pentium III and Core Solo
264 * and Core Duo CPUs.
265 * " Page with PAT set to WC while associated MTRR is UC
266 * may consolidate to UC "
267 * Because of this erratum, it is better to stick with
268 * setting WC in MTRR rather than using PAT on these CPUs.
269 *
270 * Enable PAT WC only on P4, Core 2 or later CPUs.
271 */
272 if (c->x86 == 6 && c->x86_model < 15)
273 clear_cpu_cap(c, X86_FEATURE_PAT);
f8561296 274
161ec53c
FY
275 /*
276 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
277 * clear the fast string and enhanced fast string CPU capabilities.
278 */
279 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
280 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
281 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
1b74dde7 282 pr_info("Disabled fast string operations\n");
161ec53c
FY
283 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
284 setup_clear_cpu_cap(X86_FEATURE_ERMS);
285 }
286 }
ee1b5b16
BD
287
288 /*
289 * Intel Quark Core DevMan_001.pdf section 6.4.11
290 * "The operating system also is required to invalidate (i.e., flush)
291 * the TLB when any changes are made to any of the page table entries.
292 * The operating system must reload CR3 to cause the TLB to be flushed"
293 *
c109bf95
BP
294 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
295 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
296 * to be modified.
ee1b5b16
BD
297 */
298 if (c->x86 == 5 && c->x86_model == 9) {
299 pr_info("Disabling PGE capability bit\n");
300 setup_clear_cpu_cap(X86_FEATURE_PGE);
301 }
1f12e32f
TG
302
303 if (c->cpuid_level >= 0x00000001) {
304 u32 eax, ebx, ecx, edx;
305
306 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
307 /*
308 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
309 * apicids which are reserved per package. Store the resulting
310 * shift value for the package management code.
311 */
312 if (edx & (1U << 28))
313 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
314 }
0f6ff2bc 315
1e03bff3 316 check_memory_type_self_snoop_errata(c);
1910ad56
TG
317
318 /*
319 * Get the number of SMT siblings early from the extended topology
320 * leaf, if available. Otherwise try the legacy SMT detection.
321 */
322 if (detect_extended_topology_early(c) < 0)
323 detect_ht_early(c);
1da177e4
LT
324}
325
923f3a2b
RC
326static void bsp_init_intel(struct cpuinfo_x86 *c)
327{
328 resctrl_cpu_detect(c);
329}
330
185f3b9d 331#ifdef CONFIG_X86_32
1da177e4
LT
332/*
333 * Early probe support logic for ppro memory erratum #50
334 *
335 * This is called before we do cpu ident work
336 */
65eb6b43 337
148f9bb8 338int ppro_with_ram_bug(void)
1da177e4
LT
339{
340 /* Uses data from early_cpu_detect now */
341 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
342 boot_cpu_data.x86 == 6 &&
343 boot_cpu_data.x86_model == 1 &&
b399151c 344 boot_cpu_data.x86_stepping < 8) {
1b74dde7 345 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
1da177e4
LT
346 return 1;
347 }
348 return 0;
349}
65eb6b43 350
148f9bb8 351static void intel_smp_check(struct cpuinfo_x86 *c)
1f442d70 352{
1f442d70 353 /* calling is from identify_secondary_cpu() ? */
f6e9456c 354 if (!c->cpu_index)
1f442d70
YL
355 return;
356
357 /*
358 * Mask B, Pentium, but not Pentium MMX
359 */
360 if (c->x86 == 5 &&
b399151c 361 c->x86_stepping >= 1 && c->x86_stepping <= 4 &&
1f442d70
YL
362 c->x86_model <= 3) {
363 /*
364 * Remember we have B step Pentia with bugs
365 */
366 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
367 "with B stepping processors.\n");
368 }
1f442d70
YL
369}
370
69f2366c
CB
371static int forcepae;
372static int __init forcepae_setup(char *__unused)
373{
374 forcepae = 1;
375 return 1;
376}
377__setup("forcepae", forcepae_setup);
378
148f9bb8 379static void intel_workarounds(struct cpuinfo_x86 *c)
1da177e4 380{
4052704d
YL
381#ifdef CONFIG_X86_F00F_BUG
382 /*
d4e1a0af 383 * All models of Pentium and Pentium with MMX technology CPUs
8bdbd962 384 * have the F0 0F bug, which lets nonprivileged users lock up the
4eefbe79 385 * system. Announce that the fault handler will be checking for it.
d4e1a0af 386 * The Quark is also family 5, but does not have the same bug.
4052704d 387 */
e2604b49 388 clear_cpu_bug(c, X86_BUG_F00F);
fa392794 389 if (c->x86 == 5 && c->x86_model < 9) {
4052704d
YL
390 static int f00f_workaround_enabled;
391
e2604b49 392 set_cpu_bug(c, X86_BUG_F00F);
4052704d 393 if (!f00f_workaround_enabled) {
1b74dde7 394 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
4052704d
YL
395 f00f_workaround_enabled = 1;
396 }
397 }
398#endif
399
400 /*
401 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
402 * model 3 mask 3
403 */
b399151c 404 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633)
4052704d
YL
405 clear_cpu_cap(c, X86_FEATURE_SEP);
406
69f2366c
CB
407 /*
408 * PAE CPUID issue: many Pentium M report no PAE but may have a
409 * functionally usable PAE implementation.
410 * Forcefully enable PAE if kernel parameter "forcepae" is present.
411 */
412 if (forcepae) {
1b74dde7 413 pr_warn("PAE forced!\n");
69f2366c
CB
414 set_cpu_cap(c, X86_FEATURE_PAE);
415 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
416 }
417
4052704d 418 /*
f0133acc 419 * P4 Xeon erratum 037 workaround.
4052704d
YL
420 * Hardware prefetcher may cause stale data to be loaded into the cache.
421 */
b399151c 422 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) {
0b131be8 423 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
f0133acc 424 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
c0a639ad 425 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
f0133acc 426 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
1da177e4
LT
427 }
428 }
1da177e4 429
4052704d
YL
430 /*
431 * See if we have a good local APIC by checking for buggy Pentia,
432 * i.e. all B steppings and the C2 stepping of P54C when using their
433 * integrated APIC (see 11AP erratum in "Pentium Processor
434 * Specification Update").
435 */
93984fbd 436 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
b399151c 437 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
9b13a93d 438 set_cpu_bug(c, X86_BUG_11AP);
185f3b9d 439
185f3b9d 440
4052704d 441#ifdef CONFIG_X86_INTEL_USERCOPY
185f3b9d 442 /*
4052704d 443 * Set up the preferred alignment for movsl bulk memory moves
185f3b9d 444 */
4052704d
YL
445 switch (c->x86) {
446 case 4: /* 486: untested */
447 break;
448 case 5: /* Old Pentia: untested */
449 break;
450 case 6: /* PII/PIII only like movsl with 8-byte alignment */
451 movsl_mask.mask = 7;
452 break;
453 case 15: /* P4 is OK down to 8-byte alignment */
454 movsl_mask.mask = 7;
455 break;
456 }
185f3b9d 457#endif
4052704d 458
1f442d70 459 intel_smp_check(c);
4052704d
YL
460}
461#else
148f9bb8 462static void intel_workarounds(struct cpuinfo_x86 *c)
4052704d
YL
463{
464}
185f3b9d
YL
465#endif
466
148f9bb8 467static void srat_detect_node(struct cpuinfo_x86 *c)
185f3b9d 468{
645a7919 469#ifdef CONFIG_NUMA
185f3b9d
YL
470 unsigned node;
471 int cpu = smp_processor_id();
185f3b9d
YL
472
473 /* Don't do the funky fallback heuristics the AMD version employs
474 for now. */
bbc9e2f4 475 node = numa_cpu_node(cpu);
50f2d7f6 476 if (node == NUMA_NO_NODE || !node_online(node)) {
d9c2d5ac
YL
477 /* reuse the value from init_cpu_to_node() */
478 node = cpu_to_node(cpu);
479 }
185f3b9d 480 numa_set_node(cpu, node);
185f3b9d
YL
481#endif
482}
483
cb06d8e3
KS
484#define MSR_IA32_TME_ACTIVATE 0x982
485
486/* Helpers to access TME_ACTIVATE MSR */
487#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
488#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
489
490#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
491#define TME_ACTIVATE_POLICY_AES_XTS_128 0
492
493#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
494
495#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
496#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
497
498/* Values for mktme_status (SW only construct) */
499#define MKTME_ENABLED 0
500#define MKTME_DISABLED 1
501#define MKTME_UNINITIALIZED 2
502static int mktme_status = MKTME_UNINITIALIZED;
503
504static void detect_tme(struct cpuinfo_x86 *c)
505{
506 u64 tme_activate, tme_policy, tme_crypto_algs;
507 int keyid_bits = 0, nr_keyids = 0;
508 static u64 tme_activate_cpu0 = 0;
509
510 rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
511
512 if (mktme_status != MKTME_UNINITIALIZED) {
513 if (tme_activate != tme_activate_cpu0) {
514 /* Broken BIOS? */
eaeb8e76 515 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
cb06d8e3
KS
516 pr_err_once("x86/tme: MKTME is not usable\n");
517 mktme_status = MKTME_DISABLED;
518
519 /* Proceed. We may need to exclude bits from x86_phys_bits. */
520 }
521 } else {
522 tme_activate_cpu0 = tme_activate;
523 }
524
525 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
526 pr_info_once("x86/tme: not enabled by BIOS\n");
527 mktme_status = MKTME_DISABLED;
528 return;
529 }
530
531 if (mktme_status != MKTME_UNINITIALIZED)
532 goto detect_keyid_bits;
533
534 pr_info("x86/tme: enabled by BIOS\n");
535
536 tme_policy = TME_ACTIVATE_POLICY(tme_activate);
537 if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
538 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
539
540 tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
541 if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
542 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
543 tme_crypto_algs);
544 mktme_status = MKTME_DISABLED;
545 }
546detect_keyid_bits:
547 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
548 nr_keyids = (1UL << keyid_bits) - 1;
549 if (nr_keyids) {
550 pr_info_once("x86/mktme: enabled by BIOS\n");
551 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
552 } else {
553 pr_info_once("x86/mktme: disabled by BIOS\n");
554 }
555
556 if (mktme_status == MKTME_UNINITIALIZED) {
557 /* MKTME is usable */
558 mktme_status = MKTME_ENABLED;
559 }
560
561 /*
547edaca
KS
562 * KeyID bits effectively lower the number of physical address
563 * bits. Update cpuinfo_x86::x86_phys_bits accordingly.
cb06d8e3
KS
564 */
565 c->x86_phys_bits -= keyid_bits;
566}
567
90218ac7
KH
568static void init_cpuid_fault(struct cpuinfo_x86 *c)
569{
570 u64 msr;
571
572 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
573 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
574 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
575 }
576}
577
578static void init_intel_misc_features(struct cpuinfo_x86 *c)
579{
580 u64 msr;
581
582 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
583 return;
584
e9ea1e7f
KH
585 /* Clear all MISC features */
586 this_cpu_write(msr_misc_features_shadow, 0);
587
588 /* Check features and update capabilities and shadow control bits */
90218ac7
KH
589 init_cpuid_fault(c);
590 probe_xeon_phi_r3mwait(c);
e9ea1e7f
KH
591
592 msr = this_cpu_read(msr_misc_features_shadow);
593 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
90218ac7
KH
594}
595
6650cdd9
PZI
596static void split_lock_init(void);
597
148f9bb8 598static void init_intel(struct cpuinfo_x86 *c)
1da177e4 599{
2b16a235
AK
600 early_init_intel(c);
601
4052704d 602 intel_workarounds(c);
1da177e4 603
345077cd
SS
604 /*
605 * Detect the extended topology information if available. This
606 * will reinitialise the initial_apicid which will be used
607 * in init_intel_cacheinfo()
608 */
609 detect_extended_topology(c);
610
2a226155
PZ
611 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
612 /*
613 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
614 * detection.
615 */
9305bd6c 616 detect_num_cpu_cores(c);
2a226155
PZ
617#ifdef CONFIG_X86_32
618 detect_ht(c);
619#endif
620 }
621
807e9bc8 622 init_intel_cacheinfo(c);
aece118e 623
65eb6b43 624 if (c->cpuid_level > 9) {
0080e667
VP
625 unsigned eax = cpuid_eax(10);
626 /* Check for version and the number of counters */
627 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
d0e95ebd 628 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667 629 }
1da177e4 630
054efb64 631 if (cpu_has(c, X86_FEATURE_XMM2))
4052704d 632 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
362f924b
BP
633
634 if (boot_cpu_has(X86_FEATURE_DS)) {
807e9bc8
DW
635 unsigned int l1, l2;
636
4052704d
YL
637 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
638 if (!(l1 & (1<<11)))
639 set_cpu_cap(c, X86_FEATURE_BTS);
640 if (!(l1 & (1<<12)))
641 set_cpu_cap(c, X86_FEATURE_PEBS);
4052704d 642 }
1da177e4 643
906bf7fd 644 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
40e2d7f9 645 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
9b13a93d 646 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
e736ad54 647
08e237fa
PZ
648 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
649 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
650 set_cpu_bug(c, X86_BUG_MONITOR);
651
4052704d
YL
652#ifdef CONFIG_X86_64
653 if (c->x86 == 15)
654 c->x86_cache_alignment = c->x86_clflush_size * 2;
655 if (c->x86 == 6)
656 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
657#else
65eb6b43
PC
658 /*
659 * Names for the Pentium II/Celeron processors
660 * detectable only by also checking the cache size.
661 * Dixon is NOT a Celeron.
662 */
1da177e4 663 if (c->x86 == 6) {
807e9bc8 664 unsigned int l2 = c->x86_cache_size;
4052704d
YL
665 char *p = NULL;
666
1da177e4
LT
667 switch (c->x86_model) {
668 case 5:
865be7a8
OZ
669 if (l2 == 0)
670 p = "Celeron (Covington)";
671 else if (l2 == 256)
672 p = "Mobile Pentium II (Dixon)";
1da177e4 673 break;
65eb6b43 674
1da177e4
LT
675 case 6:
676 if (l2 == 128)
677 p = "Celeron (Mendocino)";
b399151c 678 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
1da177e4
LT
679 p = "Celeron-A";
680 break;
65eb6b43 681
1da177e4
LT
682 case 8:
683 if (l2 == 128)
684 p = "Celeron (Coppermine)";
685 break;
686 }
1da177e4 687
4052704d
YL
688 if (p)
689 strcpy(c->x86_model_id, p);
1da177e4 690 }
1da177e4 691
185f3b9d
YL
692 if (c->x86 == 15)
693 set_cpu_cap(c, X86_FEATURE_P4);
694 if (c->x86 == 6)
695 set_cpu_cap(c, X86_FEATURE_P3);
f4166c54 696#endif
185f3b9d 697
185f3b9d 698 /* Work around errata */
2759c328 699 srat_detect_node(c);
e38e05a8 700
1db2a6e1
SC
701 init_ia32_feat_ctl(c);
702
cb06d8e3
KS
703 if (cpu_has(c, X86_FEATURE_TME))
704 detect_tme(c);
705
90218ac7 706 init_intel_misc_features(c);
95c5824f
PG
707
708 if (tsx_ctrl_state == TSX_CTRL_ENABLE)
709 tsx_enable();
710 if (tsx_ctrl_state == TSX_CTRL_DISABLE)
711 tsx_disable();
6650cdd9
PZI
712
713 split_lock_init();
42ed458a 714}
1da177e4 715
185f3b9d 716#ifdef CONFIG_X86_32
148f9bb8 717static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1da177e4 718{
65eb6b43
PC
719 /*
720 * Intel PIII Tualatin. This comes in two flavours.
1da177e4
LT
721 * One has 256kb of cache, the other 512. We have no way
722 * to determine which, so we use a boottime override
723 * for the 512kb model, and assume 256 otherwise.
724 */
725 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
726 size = 256;
aece118e
BD
727
728 /*
729 * Intel Quark SoC X1000 contains a 4-way set associative
730 * 16K cache with a 16 byte cache line and 256 lines per tag
731 */
732 if ((c->x86 == 5) && (c->x86_model == 9))
733 size = 16;
1da177e4
LT
734 return size;
735}
185f3b9d 736#endif
1da177e4 737
e0ba94f1
AS
738#define TLB_INST_4K 0x01
739#define TLB_INST_4M 0x02
740#define TLB_INST_2M_4M 0x03
741
742#define TLB_INST_ALL 0x05
743#define TLB_INST_1G 0x06
744
745#define TLB_DATA_4K 0x11
746#define TLB_DATA_4M 0x12
747#define TLB_DATA_2M_4M 0x13
748#define TLB_DATA_4K_4M 0x14
749
750#define TLB_DATA_1G 0x16
751
752#define TLB_DATA0_4K 0x21
753#define TLB_DATA0_4M 0x22
754#define TLB_DATA0_2M_4M 0x23
755
756#define STLB_4K 0x41
dd360393 757#define STLB_4K_2M 0x42
e0ba94f1 758
148f9bb8 759static const struct _tlb_table intel_tlb_table[] = {
e0ba94f1
AS
760 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
761 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
762 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
763 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
764 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
765 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
77df779d 766 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
e0ba94f1
AS
767 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
768 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
769 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
770 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
771 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
772 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
773 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
774 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
775 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
776 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
777 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
dd360393
KS
778 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
779 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
b837913f 780 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
781 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
782 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
dd360393 783 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
e0ba94f1
AS
784 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
785 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
786 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
787 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
788 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
a927792c
YG
789 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
790 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
e0ba94f1
AS
791 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
792 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
dd360393 793 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
77df779d 794 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
e0ba94f1
AS
795 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
796 { 0x00, 0, 0 }
797};
798
148f9bb8 799static void intel_tlb_lookup(const unsigned char desc)
e0ba94f1
AS
800{
801 unsigned char k;
802 if (desc == 0)
803 return;
804
805 /* look up this descriptor in the table */
77df779d
SH
806 for (k = 0; intel_tlb_table[k].descriptor != desc &&
807 intel_tlb_table[k].descriptor != 0; k++)
e0ba94f1
AS
808 ;
809
810 if (intel_tlb_table[k].tlb_type == 0)
811 return;
812
813 switch (intel_tlb_table[k].tlb_type) {
814 case STLB_4K:
815 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
816 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
817 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
818 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
819 break;
dd360393
KS
820 case STLB_4K_2M:
821 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
822 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
823 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
824 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
825 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
826 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
827 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
828 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
829 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
830 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
831 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
832 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
833 break;
e0ba94f1
AS
834 case TLB_INST_ALL:
835 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
836 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
837 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
838 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
839 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
840 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
841 break;
842 case TLB_INST_4K:
843 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
844 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
845 break;
846 case TLB_INST_4M:
847 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
848 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
849 break;
850 case TLB_INST_2M_4M:
851 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
852 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
853 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
854 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
855 break;
856 case TLB_DATA_4K:
857 case TLB_DATA0_4K:
858 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
859 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
860 break;
861 case TLB_DATA_4M:
862 case TLB_DATA0_4M:
863 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
864 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
865 break;
866 case TLB_DATA_2M_4M:
867 case TLB_DATA0_2M_4M:
868 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
869 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
870 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
871 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
872 break;
873 case TLB_DATA_4K_4M:
874 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
875 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
876 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
877 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
878 break;
dd360393
KS
879 case TLB_DATA_1G:
880 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
881 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
e0ba94f1
AS
882 break;
883 }
884}
885
148f9bb8 886static void intel_detect_tlb(struct cpuinfo_x86 *c)
e0ba94f1
AS
887{
888 int i, j, n;
889 unsigned int regs[4];
890 unsigned char *desc = (unsigned char *)regs;
5b556332
BP
891
892 if (c->cpuid_level < 2)
893 return;
894
e0ba94f1
AS
895 /* Number of times to iterate */
896 n = cpuid_eax(2) & 0xFF;
897
898 for (i = 0 ; i < n ; i++) {
899 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
900
901 /* If bit 31 is set, this is an unknown format */
902 for (j = 0 ; j < 3 ; j++)
903 if (regs[j] & (1 << 31))
904 regs[j] = 0;
905
906 /* Byte 0 is level count, not a descriptor */
907 for (j = 1 ; j < 16 ; j++)
908 intel_tlb_lookup(desc[j]);
909 }
910}
911
148f9bb8 912static const struct cpu_dev intel_cpu_dev = {
1da177e4 913 .c_vendor = "Intel",
65eb6b43 914 .c_ident = { "GenuineIntel" },
185f3b9d 915#ifdef CONFIG_X86_32
09dc68d9
JB
916 .legacy_models = {
917 { .family = 4, .model_names =
65eb6b43
PC
918 {
919 [0] = "486 DX-25/33",
920 [1] = "486 DX-50",
921 [2] = "486 SX",
922 [3] = "486 DX/2",
923 [4] = "486 SL",
924 [5] = "486 SX/2",
925 [7] = "486 DX/2-WB",
926 [8] = "486 DX/4",
1da177e4
LT
927 [9] = "486 DX/4-WB"
928 }
929 },
09dc68d9 930 { .family = 5, .model_names =
65eb6b43
PC
931 {
932 [0] = "Pentium 60/66 A-step",
933 [1] = "Pentium 60/66",
1da177e4 934 [2] = "Pentium 75 - 200",
65eb6b43 935 [3] = "OverDrive PODP5V83",
1da177e4 936 [4] = "Pentium MMX",
65eb6b43 937 [7] = "Mobile Pentium 75 - 200",
aece118e
BD
938 [8] = "Mobile Pentium MMX",
939 [9] = "Quark SoC X1000",
1da177e4
LT
940 }
941 },
09dc68d9 942 { .family = 6, .model_names =
65eb6b43 943 {
1da177e4 944 [0] = "Pentium Pro A-step",
65eb6b43
PC
945 [1] = "Pentium Pro",
946 [3] = "Pentium II (Klamath)",
947 [4] = "Pentium II (Deschutes)",
948 [5] = "Pentium II (Deschutes)",
1da177e4 949 [6] = "Mobile Pentium II",
65eb6b43
PC
950 [7] = "Pentium III (Katmai)",
951 [8] = "Pentium III (Coppermine)",
1da177e4
LT
952 [10] = "Pentium III (Cascades)",
953 [11] = "Pentium III (Tualatin)",
954 }
955 },
09dc68d9 956 { .family = 15, .model_names =
1da177e4
LT
957 {
958 [0] = "Pentium 4 (Unknown)",
959 [1] = "Pentium 4 (Willamette)",
960 [2] = "Pentium 4 (Northwood)",
961 [4] = "Pentium 4 (Foster)",
962 [5] = "Pentium 4 (Foster)",
963 }
964 },
965 },
09dc68d9 966 .legacy_cache_size = intel_size_cache,
185f3b9d 967#endif
e0ba94f1 968 .c_detect_tlb = intel_detect_tlb,
03ae5768 969 .c_early_init = early_init_intel,
923f3a2b 970 .c_bsp_init = bsp_init_intel,
1da177e4 971 .c_init = init_intel,
10a434fc 972 .c_x86_vendor = X86_VENDOR_INTEL,
1da177e4
LT
973};
974
10a434fc 975cpu_dev_register(intel_cpu_dev);
6650cdd9
PZI
976
977#undef pr_fmt
978#define pr_fmt(fmt) "x86/split lock detection: " fmt
979
980static const struct {
981 const char *option;
982 enum split_lock_detect_state state;
983} sld_options[] __initconst = {
984 { "off", sld_off },
985 { "warn", sld_warn },
986 { "fatal", sld_fatal },
987};
988
989static inline bool match_option(const char *arg, int arglen, const char *opt)
990{
991 int len = strlen(opt);
992
993 return len == arglen && !strncmp(arg, opt, len);
994}
995
dbaba470
XL
996static bool split_lock_verify_msr(bool on)
997{
998 u64 ctrl, tmp;
999
1000 if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
1001 return false;
1002 if (on)
1003 ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1004 else
1005 ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
1006 if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
1007 return false;
1008 rdmsrl(MSR_TEST_CTRL, tmp);
1009 return ctrl == tmp;
1010}
1011
6650cdd9
PZI
1012static void __init split_lock_setup(void)
1013{
dbaba470 1014 enum split_lock_detect_state state = sld_warn;
6650cdd9
PZI
1015 char arg[20];
1016 int i, ret;
1017
dbaba470
XL
1018 if (!split_lock_verify_msr(false)) {
1019 pr_info("MSR access failed: Disabled\n");
1020 return;
1021 }
6650cdd9
PZI
1022
1023 ret = cmdline_find_option(boot_command_line, "split_lock_detect",
1024 arg, sizeof(arg));
1025 if (ret >= 0) {
1026 for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
1027 if (match_option(arg, ret, sld_options[i].option)) {
dbaba470 1028 state = sld_options[i].state;
6650cdd9
PZI
1029 break;
1030 }
1031 }
1032 }
1033
dbaba470 1034 switch (state) {
6650cdd9
PZI
1035 case sld_off:
1036 pr_info("disabled\n");
dbaba470 1037 return;
6650cdd9
PZI
1038 case sld_warn:
1039 pr_info("warning about user-space split_locks\n");
1040 break;
6650cdd9
PZI
1041 case sld_fatal:
1042 pr_info("sending SIGBUS on user-space split_locks\n");
1043 break;
1044 }
dbaba470 1045
a6a60741
XL
1046 rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
1047
dbaba470
XL
1048 if (!split_lock_verify_msr(true)) {
1049 pr_info("MSR access failed: Disabled\n");
1050 return;
1051 }
1052
1053 sld_state = state;
1054 setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
6650cdd9
PZI
1055}
1056
1057/*
dbaba470
XL
1058 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
1059 * is not implemented as one thread could undo the setting of the other
1060 * thread immediately after dropping the lock anyway.
6650cdd9 1061 */
dbaba470 1062static void sld_update_msr(bool on)
6650cdd9 1063{
a6a60741 1064 u64 test_ctrl_val = msr_test_ctrl_cache;
6650cdd9
PZI
1065
1066 if (on)
1067 test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
6650cdd9 1068
dbaba470 1069 wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
6650cdd9
PZI
1070}
1071
1072static void split_lock_init(void)
1073{
dbaba470 1074 split_lock_verify_msr(sld_state != sld_off);
6650cdd9
PZI
1075}
1076
d7e94dbd 1077static void split_lock_warn(unsigned long ip)
6650cdd9 1078{
6650cdd9 1079 pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
d7e94dbd 1080 current->comm, current->pid, ip);
6650cdd9
PZI
1081
1082 /*
1083 * Disable the split lock detection for this task so it can make
1084 * progress and set TIF_SLD so the detection is re-enabled via
1085 * switch_to_sld() when the task is scheduled out.
1086 */
dbaba470 1087 sld_update_msr(false);
6650cdd9 1088 set_tsk_thread_flag(current, TIF_SLD);
d7e94dbd
TG
1089}
1090
1091bool handle_guest_split_lock(unsigned long ip)
1092{
1093 if (sld_state == sld_warn) {
1094 split_lock_warn(ip);
1095 return true;
1096 }
1097
1098 pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
1099 current->comm, current->pid,
1100 sld_state == sld_fatal ? "fatal" : "bogus", ip);
1101
1102 current->thread.error_code = 0;
1103 current->thread.trap_nr = X86_TRAP_AC;
1104 force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
1105 return false;
1106}
1107EXPORT_SYMBOL_GPL(handle_guest_split_lock);
1108
1109bool handle_user_split_lock(struct pt_regs *regs, long error_code)
1110{
1111 if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
1112 return false;
1113 split_lock_warn(regs->ip);
6650cdd9
PZI
1114 return true;
1115}
1116
1117/*
1118 * This function is called only when switching between tasks with
1119 * different split-lock detection modes. It sets the MSR for the
1120 * mode of the new task. This is right most of the time, but since
1121 * the MSR is shared by hyperthreads on a physical core there can
1122 * be glitches when the two threads need different modes.
1123 */
1124void switch_to_sld(unsigned long tifn)
1125{
dbaba470 1126 sld_update_msr(!(tifn & _TIF_SLD));
6650cdd9
PZI
1127}
1128
6650cdd9 1129/*
48fd5b5e
TL
1130 * Bits in the IA32_CORE_CAPABILITIES are not architectural, so they should
1131 * only be trusted if it is confirmed that a CPU model implements a
1132 * specific feature at a particular bit position.
1133 *
1134 * The possible driver data field values:
1135 *
1136 * - 0: CPU models that are known to have the per-core split-lock detection
1137 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
1138 *
1139 * - 1: CPU models which may enumerate IA32_CORE_CAPABILITIES and if so use
1140 * bit 5 to enumerate the per-core split-lock detection feature.
6650cdd9
PZI
1141 */
1142static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
3ab0762d
TL
1143 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
1144 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
429ac8b7 1145 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
8b9a18a9
TL
1146 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, 1),
1147 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, 1),
1148 X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, 1),
429ac8b7
FY
1149 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, 1),
1150 X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
6650cdd9
PZI
1151 {}
1152};
1153
1154void __init cpu_set_core_cap_bits(struct cpuinfo_x86 *c)
1155{
48fd5b5e
TL
1156 const struct x86_cpu_id *m;
1157 u64 ia32_core_caps;
1158
1159 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
1160 return;
6650cdd9 1161
48fd5b5e
TL
1162 m = x86_match_cpu(split_lock_cpu_ids);
1163 if (!m)
6650cdd9 1164 return;
48fd5b5e
TL
1165
1166 switch (m->driver_data) {
1167 case 0:
1168 break;
1169 case 1:
1170 if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
1171 return;
6650cdd9 1172 rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
48fd5b5e
TL
1173 if (!(ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT))
1174 return;
1175 break;
1176 default:
1177 return;
6650cdd9
PZI
1178 }
1179
48fd5b5e 1180 split_lock_setup();
6650cdd9 1181}