selftests/x86/mpx: Fix incorrect bounds with old _sigfault
[linux-block.git] / arch / x86 / kernel / cpu / intel.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4
LT
2#include <linux/kernel.h>
3
4#include <linux/string.h>
5#include <linux/bitops.h>
6#include <linux/smp.h>
83ce4009 7#include <linux/sched.h>
e6017571 8#include <linux/sched/clock.h>
1da177e4 9#include <linux/thread_info.h>
186f4360 10#include <linux/init.h>
8bdbd962 11#include <linux/uaccess.h>
1da177e4 12
cd4d09ec 13#include <asm/cpufeature.h>
d72b1b4f 14#include <asm/pgtable.h>
1da177e4 15#include <asm/msr.h>
73bdb73f 16#include <asm/bugs.h>
1f442d70 17#include <asm/cpu.h>
08e237fa 18#include <asm/intel-family.h>
4167709b 19#include <asm/microcode_intel.h>
e16fd002
GA
20#include <asm/hwcap2.h>
21#include <asm/elf.h>
1da177e4 22
185f3b9d 23#ifdef CONFIG_X86_64
8bdbd962 24#include <linux/topology.h>
185f3b9d
YL
25#endif
26
1da177e4
LT
27#include "cpu.h"
28
29#ifdef CONFIG_X86_LOCAL_APIC
30#include <asm/mpspec.h>
31#include <asm/apic.h>
1da177e4
LT
32#endif
33
0f6ff2bc
DH
34/*
35 * Just in case our CPU detection goes bad, or you have a weird system,
36 * allow a way to override the automatic disabling of MPX.
37 */
38static int forcempx;
39
40static int __init forcempx_setup(char *__unused)
41{
42 forcempx = 1;
43
44 return 1;
45}
46__setup("intel-skd-046-workaround=disable", forcempx_setup);
47
48void check_mpx_erratum(struct cpuinfo_x86 *c)
49{
50 if (forcempx)
51 return;
52 /*
53 * Turn off the MPX feature on CPUs where SMEP is not
54 * available or disabled.
55 *
56 * Works around Intel Erratum SKD046: "Branch Instructions
57 * May Initialize MPX Bound Registers Incorrectly".
58 *
59 * This might falsely disable MPX on systems without
60 * SMEP, like Atom processors without SMEP. But there
61 * is no such hardware known at the moment.
62 */
63 if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) {
64 setup_clear_cpu_cap(X86_FEATURE_MPX);
65 pr_warn("x86/mpx: Disabling MPX since SMEP not present\n");
66 }
67}
68
e16fd002
GA
69static bool ring3mwait_disabled __read_mostly;
70
71static int __init ring3mwait_disable(char *__unused)
72{
73 ring3mwait_disabled = true;
74 return 0;
75}
76__setup("ring3mwait=disable", ring3mwait_disable);
77
78static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
79{
80 /*
81 * Ring 3 MONITOR/MWAIT feature cannot be detected without
82 * cpu model and family comparison.
83 */
4d8bb006 84 if (c->x86 != 6)
e16fd002 85 return;
4d8bb006
PL
86 switch (c->x86_model) {
87 case INTEL_FAM6_XEON_PHI_KNL:
88 case INTEL_FAM6_XEON_PHI_KNM:
89 break;
90 default:
91 return;
92 }
e16fd002 93
e9ea1e7f 94 if (ring3mwait_disabled)
e16fd002 95 return;
e16fd002
GA
96
97 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
e9ea1e7f
KH
98 this_cpu_or(msr_misc_features_shadow,
99 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
e16fd002
GA
100
101 if (c == &boot_cpu_data)
102 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
103}
104
a5b29663
DW
105/*
106 * Early microcode releases for the Spectre v2 mitigation were broken.
107 * Information taken from;
108 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
109 * - https://kb.vmware.com/s/article/52345
110 * - Microcode revisions observed in the wild
111 * - Release note from 20180108 microcode release
112 */
113struct sku_microcode {
114 u8 model;
115 u8 stepping;
116 u32 microcode;
117};
118static const struct sku_microcode spectre_bad_microcodes[] = {
d37fc6d3
DW
119 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x80 },
120 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x80 },
121 { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x80 },
122 { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x80 },
123 { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
a5b29663
DW
124 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
125 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
d37fc6d3 126 { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
a5b29663
DW
127 { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
128 { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
129 { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
130 { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 },
131 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
132 { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 },
133 { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 },
134 { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 },
135 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
136 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
137 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
a5b29663
DW
138 /* Observed in the wild */
139 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
140 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
141};
142
143static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
144{
145 int i;
146
147 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
148 if (c->x86_model == spectre_bad_microcodes[i].model &&
149 c->x86_mask == spectre_bad_microcodes[i].stepping)
150 return (c->microcode <= spectre_bad_microcodes[i].microcode);
151 }
152 return false;
153}
154
148f9bb8 155static void early_init_intel(struct cpuinfo_x86 *c)
1da177e4 156{
161ec53c
FY
157 u64 misc_enable;
158
99fb4d34 159 /* Unmask CPUID levels if masked: */
30a0fb94 160 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
0b131be8
PA
161 if (msr_clear_bit(MSR_IA32_MISC_ENABLE,
162 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) {
99fb4d34 163 c->cpuid_level = cpuid_eax(0);
d900329e 164 get_cpu_cap(c);
99fb4d34 165 }
066941bd
PA
166 }
167
2b16a235
AK
168 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
169 (c->x86 == 0x6 && c->x86_model >= 0x0e))
170 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
185f3b9d 171
4167709b
BP
172 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
173 c->microcode = intel_get_microcode_revision();
506ed6b5 174
2961298e 175 /* Now if any of them are set, check the blacklist and clear the lot */
7fcae111
DW
176 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
177 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
178 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
2961298e
DW
179 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
180 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
7fcae111
DW
181 setup_clear_cpu_cap(X86_FEATURE_IBRS);
182 setup_clear_cpu_cap(X86_FEATURE_IBPB);
183 setup_clear_cpu_cap(X86_FEATURE_STIBP);
184 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
185 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
a5b29663
DW
186 }
187
7a0fc404
PA
188 /*
189 * Atom erratum AAE44/AAF40/AAG38/AAH41:
190 *
191 * A race condition between speculative fetches and invalidating
192 * a large page. This is worked around in microcode, but we
193 * need the microcode to have already been loaded... so if it is
194 * not, recommend a BIOS update and disable large pages.
195 */
30963c0a
AK
196 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 &&
197 c->microcode < 0x20e) {
1b74dde7 198 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
30963c0a 199 clear_cpu_cap(c, X86_FEATURE_PSE);
7a0fc404
PA
200 }
201
185f3b9d
YL
202#ifdef CONFIG_X86_64
203 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
204#else
205 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
206 if (c->x86 == 15 && c->x86_cache_alignment == 64)
207 c->x86_cache_alignment = 128;
208#endif
40fb1715 209
13c6c532
JB
210 /* CPUID workaround for 0F33/0F34 CPU */
211 if (c->x86 == 0xF && c->x86_model == 0x3
212 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
213 c->x86_phys_bits = 36;
214
40fb1715
VP
215 /*
216 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
83ce4009
IM
217 * with P/T states and does not stop in deep C-states.
218 *
219 * It is also reliable across cores and sockets. (but not across
220 * cabinets - we turn it off in that case explicitly.)
40fb1715
VP
221 */
222 if (c->x86_power & (1 << 8)) {
223 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
224 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
225 }
226
c54fdbb2
FT
227 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
228 if (c->x86 == 6) {
229 switch (c->x86_model) {
230 case 0x27: /* Penwell */
231 case 0x35: /* Cloverview */
354dbaa7 232 case 0x4a: /* Merrifield */
c54fdbb2
FT
233 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
234 break;
235 default:
236 break;
237 }
238 }
239
75a04811
PA
240 /*
241 * There is a known erratum on Pentium III and Core Solo
242 * and Core Duo CPUs.
243 * " Page with PAT set to WC while associated MTRR is UC
244 * may consolidate to UC "
245 * Because of this erratum, it is better to stick with
246 * setting WC in MTRR rather than using PAT on these CPUs.
247 *
248 * Enable PAT WC only on P4, Core 2 or later CPUs.
249 */
250 if (c->x86 == 6 && c->x86_model < 15)
251 clear_cpu_cap(c, X86_FEATURE_PAT);
f8561296 252
161ec53c
FY
253 /*
254 * If fast string is not enabled in IA32_MISC_ENABLE for any reason,
255 * clear the fast string and enhanced fast string CPU capabilities.
256 */
257 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
258 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
259 if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) {
1b74dde7 260 pr_info("Disabled fast string operations\n");
161ec53c
FY
261 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
262 setup_clear_cpu_cap(X86_FEATURE_ERMS);
263 }
264 }
ee1b5b16
BD
265
266 /*
267 * Intel Quark Core DevMan_001.pdf section 6.4.11
268 * "The operating system also is required to invalidate (i.e., flush)
269 * the TLB when any changes are made to any of the page table entries.
270 * The operating system must reload CR3 to cause the TLB to be flushed"
271 *
c109bf95
BP
272 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
273 * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE
274 * to be modified.
ee1b5b16
BD
275 */
276 if (c->x86 == 5 && c->x86_model == 9) {
277 pr_info("Disabling PGE capability bit\n");
278 setup_clear_cpu_cap(X86_FEATURE_PGE);
279 }
1f12e32f
TG
280
281 if (c->cpuid_level >= 0x00000001) {
282 u32 eax, ebx, ecx, edx;
283
284 cpuid(0x00000001, &eax, &ebx, &ecx, &edx);
285 /*
286 * If HTT (EDX[28]) is set EBX[16:23] contain the number of
287 * apicids which are reserved per package. Store the resulting
288 * shift value for the package management code.
289 */
290 if (edx & (1U << 28))
291 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff);
292 }
0f6ff2bc
DH
293
294 check_mpx_erratum(c);
1da177e4
LT
295}
296
185f3b9d 297#ifdef CONFIG_X86_32
1da177e4
LT
298/*
299 * Early probe support logic for ppro memory erratum #50
300 *
301 * This is called before we do cpu ident work
302 */
65eb6b43 303
148f9bb8 304int ppro_with_ram_bug(void)
1da177e4
LT
305{
306 /* Uses data from early_cpu_detect now */
307 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
308 boot_cpu_data.x86 == 6 &&
309 boot_cpu_data.x86_model == 1 &&
310 boot_cpu_data.x86_mask < 8) {
1b74dde7 311 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
1da177e4
LT
312 return 1;
313 }
314 return 0;
315}
65eb6b43 316
148f9bb8 317static void intel_smp_check(struct cpuinfo_x86 *c)
1f442d70 318{
1f442d70 319 /* calling is from identify_secondary_cpu() ? */
f6e9456c 320 if (!c->cpu_index)
1f442d70
YL
321 return;
322
323 /*
324 * Mask B, Pentium, but not Pentium MMX
325 */
326 if (c->x86 == 5 &&
327 c->x86_mask >= 1 && c->x86_mask <= 4 &&
328 c->x86_model <= 3) {
329 /*
330 * Remember we have B step Pentia with bugs
331 */
332 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
333 "with B stepping processors.\n");
334 }
1f442d70
YL
335}
336
69f2366c
CB
337static int forcepae;
338static int __init forcepae_setup(char *__unused)
339{
340 forcepae = 1;
341 return 1;
342}
343__setup("forcepae", forcepae_setup);
344
148f9bb8 345static void intel_workarounds(struct cpuinfo_x86 *c)
1da177e4 346{
4052704d
YL
347#ifdef CONFIG_X86_F00F_BUG
348 /*
d4e1a0af 349 * All models of Pentium and Pentium with MMX technology CPUs
8bdbd962 350 * have the F0 0F bug, which lets nonprivileged users lock up the
4eefbe79 351 * system. Announce that the fault handler will be checking for it.
d4e1a0af 352 * The Quark is also family 5, but does not have the same bug.
4052704d 353 */
e2604b49 354 clear_cpu_bug(c, X86_BUG_F00F);
fa392794 355 if (c->x86 == 5 && c->x86_model < 9) {
4052704d
YL
356 static int f00f_workaround_enabled;
357
e2604b49 358 set_cpu_bug(c, X86_BUG_F00F);
4052704d 359 if (!f00f_workaround_enabled) {
1b74dde7 360 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
4052704d
YL
361 f00f_workaround_enabled = 1;
362 }
363 }
364#endif
365
366 /*
367 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
368 * model 3 mask 3
369 */
370 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
371 clear_cpu_cap(c, X86_FEATURE_SEP);
372
69f2366c
CB
373 /*
374 * PAE CPUID issue: many Pentium M report no PAE but may have a
375 * functionally usable PAE implementation.
376 * Forcefully enable PAE if kernel parameter "forcepae" is present.
377 */
378 if (forcepae) {
1b74dde7 379 pr_warn("PAE forced!\n");
69f2366c
CB
380 set_cpu_cap(c, X86_FEATURE_PAE);
381 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
382 }
383
4052704d 384 /*
f0133acc 385 * P4 Xeon erratum 037 workaround.
4052704d
YL
386 * Hardware prefetcher may cause stale data to be loaded into the cache.
387 */
1da177e4 388 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
0b131be8 389 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
f0133acc 390 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
c0a639ad 391 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
f0133acc 392 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
1da177e4
LT
393 }
394 }
1da177e4 395
4052704d
YL
396 /*
397 * See if we have a good local APIC by checking for buggy Pentia,
398 * i.e. all B steppings and the C2 stepping of P54C when using their
399 * integrated APIC (see 11AP erratum in "Pentium Processor
400 * Specification Update").
401 */
93984fbd 402 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
4052704d 403 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
9b13a93d 404 set_cpu_bug(c, X86_BUG_11AP);
185f3b9d 405
185f3b9d 406
4052704d 407#ifdef CONFIG_X86_INTEL_USERCOPY
185f3b9d 408 /*
4052704d 409 * Set up the preferred alignment for movsl bulk memory moves
185f3b9d 410 */
4052704d
YL
411 switch (c->x86) {
412 case 4: /* 486: untested */
413 break;
414 case 5: /* Old Pentia: untested */
415 break;
416 case 6: /* PII/PIII only like movsl with 8-byte alignment */
417 movsl_mask.mask = 7;
418 break;
419 case 15: /* P4 is OK down to 8-byte alignment */
420 movsl_mask.mask = 7;
421 break;
422 }
185f3b9d 423#endif
4052704d 424
1f442d70 425 intel_smp_check(c);
4052704d
YL
426}
427#else
148f9bb8 428static void intel_workarounds(struct cpuinfo_x86 *c)
4052704d
YL
429{
430}
185f3b9d
YL
431#endif
432
148f9bb8 433static void srat_detect_node(struct cpuinfo_x86 *c)
185f3b9d 434{
645a7919 435#ifdef CONFIG_NUMA
185f3b9d
YL
436 unsigned node;
437 int cpu = smp_processor_id();
185f3b9d
YL
438
439 /* Don't do the funky fallback heuristics the AMD version employs
440 for now. */
bbc9e2f4 441 node = numa_cpu_node(cpu);
50f2d7f6 442 if (node == NUMA_NO_NODE || !node_online(node)) {
d9c2d5ac
YL
443 /* reuse the value from init_cpu_to_node() */
444 node = cpu_to_node(cpu);
445 }
185f3b9d 446 numa_set_node(cpu, node);
185f3b9d
YL
447#endif
448}
449
3dd9d514
AK
450/*
451 * find out the number of processor cores on the die
452 */
148f9bb8 453static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
3dd9d514 454{
f2ab4461 455 unsigned int eax, ebx, ecx, edx;
3dd9d514 456
8d415ee2 457 if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
3dd9d514
AK
458 return 1;
459
f2ab4461
ZA
460 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
461 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
3dd9d514 462 if (eax & 0x1f)
8bdbd962 463 return (eax >> 26) + 1;
3dd9d514
AK
464 else
465 return 1;
466}
467
148f9bb8 468static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
e38e05a8
SY
469{
470 /* Intel VMX MSR indicated features */
471#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
472#define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
473#define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
474#define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
475#define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
476#define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
477
478 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
479
480 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
481 clear_cpu_cap(c, X86_FEATURE_VNMI);
482 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
483 clear_cpu_cap(c, X86_FEATURE_EPT);
484 clear_cpu_cap(c, X86_FEATURE_VPID);
485
486 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
487 msr_ctl = vmx_msr_high | vmx_msr_low;
488 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
489 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
490 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
491 set_cpu_cap(c, X86_FEATURE_VNMI);
492 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
493 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
494 vmx_msr_low, vmx_msr_high);
495 msr_ctl2 = vmx_msr_high | vmx_msr_low;
496 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
497 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
498 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
499 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
500 set_cpu_cap(c, X86_FEATURE_EPT);
501 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
502 set_cpu_cap(c, X86_FEATURE_VPID);
503 }
504}
505
b51ef52d
LA
506static void init_intel_energy_perf(struct cpuinfo_x86 *c)
507{
508 u64 epb;
509
510 /*
511 * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized.
512 * (x86_energy_perf_policy(8) is available to change it at run-time.)
513 */
514 if (!cpu_has(c, X86_FEATURE_EPB))
515 return;
516
517 rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
518 if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE)
519 return;
520
521 pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n");
522 pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n");
523 epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL;
524 wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb);
525}
526
527static void intel_bsp_resume(struct cpuinfo_x86 *c)
528{
529 /*
530 * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume,
531 * so reinitialize it properly like during bootup:
532 */
533 init_intel_energy_perf(c);
534}
535
90218ac7
KH
536static void init_cpuid_fault(struct cpuinfo_x86 *c)
537{
538 u64 msr;
539
540 if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) {
541 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
542 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
543 }
544}
545
546static void init_intel_misc_features(struct cpuinfo_x86 *c)
547{
548 u64 msr;
549
550 if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr))
551 return;
552
e9ea1e7f
KH
553 /* Clear all MISC features */
554 this_cpu_write(msr_misc_features_shadow, 0);
555
556 /* Check features and update capabilities and shadow control bits */
90218ac7
KH
557 init_cpuid_fault(c);
558 probe_xeon_phi_r3mwait(c);
e9ea1e7f
KH
559
560 msr = this_cpu_read(msr_misc_features_shadow);
561 wrmsrl(MSR_MISC_FEATURES_ENABLES, msr);
90218ac7
KH
562}
563
148f9bb8 564static void init_intel(struct cpuinfo_x86 *c)
1da177e4
LT
565{
566 unsigned int l2 = 0;
1da177e4 567
2b16a235
AK
568 early_init_intel(c);
569
4052704d 570 intel_workarounds(c);
1da177e4 571
345077cd
SS
572 /*
573 * Detect the extended topology information if available. This
574 * will reinitialise the initial_apicid which will be used
575 * in init_intel_cacheinfo()
576 */
577 detect_extended_topology(c);
578
2a226155
PZ
579 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
580 /*
581 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
582 * detection.
583 */
584 c->x86_max_cores = intel_num_cpu_cores(c);
585#ifdef CONFIG_X86_32
586 detect_ht(c);
587#endif
588 }
589
1da177e4 590 l2 = init_intel_cacheinfo(c);
aece118e
BD
591
592 /* Detect legacy cache sizes if init_intel_cacheinfo did not */
593 if (l2 == 0) {
594 cpu_detect_cache_sizes(c);
595 l2 = c->x86_cache_size;
596 }
597
65eb6b43 598 if (c->cpuid_level > 9) {
0080e667
VP
599 unsigned eax = cpuid_eax(10);
600 /* Check for version and the number of counters */
601 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
d0e95ebd 602 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
0080e667 603 }
1da177e4 604
054efb64 605 if (cpu_has(c, X86_FEATURE_XMM2))
4052704d 606 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
362f924b
BP
607
608 if (boot_cpu_has(X86_FEATURE_DS)) {
4052704d
YL
609 unsigned int l1;
610 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
611 if (!(l1 & (1<<11)))
612 set_cpu_cap(c, X86_FEATURE_BTS);
613 if (!(l1 & (1<<12)))
614 set_cpu_cap(c, X86_FEATURE_PEBS);
4052704d 615 }
1da177e4 616
906bf7fd 617 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) &&
40e2d7f9 618 (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
9b13a93d 619 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
e736ad54 620
08e237fa
PZ
621 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) &&
622 ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT)))
623 set_cpu_bug(c, X86_BUG_MONITOR);
624
4052704d
YL
625#ifdef CONFIG_X86_64
626 if (c->x86 == 15)
627 c->x86_cache_alignment = c->x86_clflush_size * 2;
628 if (c->x86 == 6)
629 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
630#else
65eb6b43
PC
631 /*
632 * Names for the Pentium II/Celeron processors
633 * detectable only by also checking the cache size.
634 * Dixon is NOT a Celeron.
635 */
1da177e4 636 if (c->x86 == 6) {
4052704d
YL
637 char *p = NULL;
638
1da177e4
LT
639 switch (c->x86_model) {
640 case 5:
865be7a8
OZ
641 if (l2 == 0)
642 p = "Celeron (Covington)";
643 else if (l2 == 256)
644 p = "Mobile Pentium II (Dixon)";
1da177e4 645 break;
65eb6b43 646
1da177e4
LT
647 case 6:
648 if (l2 == 128)
649 p = "Celeron (Mendocino)";
650 else if (c->x86_mask == 0 || c->x86_mask == 5)
651 p = "Celeron-A";
652 break;
65eb6b43 653
1da177e4
LT
654 case 8:
655 if (l2 == 128)
656 p = "Celeron (Coppermine)";
657 break;
658 }
1da177e4 659
4052704d
YL
660 if (p)
661 strcpy(c->x86_model_id, p);
1da177e4 662 }
1da177e4 663
185f3b9d
YL
664 if (c->x86 == 15)
665 set_cpu_cap(c, X86_FEATURE_P4);
666 if (c->x86 == 6)
667 set_cpu_cap(c, X86_FEATURE_P3);
f4166c54 668#endif
185f3b9d 669
185f3b9d 670 /* Work around errata */
2759c328 671 srat_detect_node(c);
e38e05a8
SY
672
673 if (cpu_has(c, X86_FEATURE_VMX))
674 detect_vmx_virtcap(c);
abe48b10 675
b51ef52d 676 init_intel_energy_perf(c);
e16fd002 677
90218ac7 678 init_intel_misc_features(c);
42ed458a 679}
1da177e4 680
185f3b9d 681#ifdef CONFIG_X86_32
148f9bb8 682static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
1da177e4 683{
65eb6b43
PC
684 /*
685 * Intel PIII Tualatin. This comes in two flavours.
1da177e4
LT
686 * One has 256kb of cache, the other 512. We have no way
687 * to determine which, so we use a boottime override
688 * for the 512kb model, and assume 256 otherwise.
689 */
690 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
691 size = 256;
aece118e
BD
692
693 /*
694 * Intel Quark SoC X1000 contains a 4-way set associative
695 * 16K cache with a 16 byte cache line and 256 lines per tag
696 */
697 if ((c->x86 == 5) && (c->x86_model == 9))
698 size = 16;
1da177e4
LT
699 return size;
700}
185f3b9d 701#endif
1da177e4 702
e0ba94f1
AS
703#define TLB_INST_4K 0x01
704#define TLB_INST_4M 0x02
705#define TLB_INST_2M_4M 0x03
706
707#define TLB_INST_ALL 0x05
708#define TLB_INST_1G 0x06
709
710#define TLB_DATA_4K 0x11
711#define TLB_DATA_4M 0x12
712#define TLB_DATA_2M_4M 0x13
713#define TLB_DATA_4K_4M 0x14
714
715#define TLB_DATA_1G 0x16
716
717#define TLB_DATA0_4K 0x21
718#define TLB_DATA0_4M 0x22
719#define TLB_DATA0_2M_4M 0x23
720
721#define STLB_4K 0x41
dd360393 722#define STLB_4K_2M 0x42
e0ba94f1 723
148f9bb8 724static const struct _tlb_table intel_tlb_table[] = {
e0ba94f1
AS
725 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
726 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
727 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
728 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
729 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
730 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
731 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" },
732 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
733 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
734 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
735 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
736 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
737 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
738 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
739 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
740 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
741 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
742 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
dd360393
KS
743 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
744 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
745 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
e0ba94f1
AS
746 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
747 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
748 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
749 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
750 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
a927792c
YG
751 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
752 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
e0ba94f1
AS
753 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
754 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
dd360393
KS
755 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
756 { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" },
e0ba94f1
AS
757 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
758 { 0x00, 0, 0 }
759};
760
148f9bb8 761static void intel_tlb_lookup(const unsigned char desc)
e0ba94f1
AS
762{
763 unsigned char k;
764 if (desc == 0)
765 return;
766
767 /* look up this descriptor in the table */
768 for (k = 0; intel_tlb_table[k].descriptor != desc && \
769 intel_tlb_table[k].descriptor != 0; k++)
770 ;
771
772 if (intel_tlb_table[k].tlb_type == 0)
773 return;
774
775 switch (intel_tlb_table[k].tlb_type) {
776 case STLB_4K:
777 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
778 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
779 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
780 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
781 break;
dd360393
KS
782 case STLB_4K_2M:
783 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
784 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
785 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
786 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
787 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
788 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
789 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
790 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
791 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
792 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
793 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
794 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
795 break;
e0ba94f1
AS
796 case TLB_INST_ALL:
797 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
798 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
799 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
800 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
801 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
802 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
803 break;
804 case TLB_INST_4K:
805 if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries)
806 tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries;
807 break;
808 case TLB_INST_4M:
809 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
810 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
811 break;
812 case TLB_INST_2M_4M:
813 if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries)
814 tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries;
815 if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries)
816 tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries;
817 break;
818 case TLB_DATA_4K:
819 case TLB_DATA0_4K:
820 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
821 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
822 break;
823 case TLB_DATA_4M:
824 case TLB_DATA0_4M:
825 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
826 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
827 break;
828 case TLB_DATA_2M_4M:
829 case TLB_DATA0_2M_4M:
830 if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries)
831 tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries;
832 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
833 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
834 break;
835 case TLB_DATA_4K_4M:
836 if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries)
837 tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries;
838 if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries)
839 tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries;
840 break;
dd360393
KS
841 case TLB_DATA_1G:
842 if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries)
843 tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries;
e0ba94f1
AS
844 break;
845 }
846}
847
148f9bb8 848static void intel_detect_tlb(struct cpuinfo_x86 *c)
e0ba94f1
AS
849{
850 int i, j, n;
851 unsigned int regs[4];
852 unsigned char *desc = (unsigned char *)regs;
5b556332
BP
853
854 if (c->cpuid_level < 2)
855 return;
856
e0ba94f1
AS
857 /* Number of times to iterate */
858 n = cpuid_eax(2) & 0xFF;
859
860 for (i = 0 ; i < n ; i++) {
861 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]);
862
863 /* If bit 31 is set, this is an unknown format */
864 for (j = 0 ; j < 3 ; j++)
865 if (regs[j] & (1 << 31))
866 regs[j] = 0;
867
868 /* Byte 0 is level count, not a descriptor */
869 for (j = 1 ; j < 16 ; j++)
870 intel_tlb_lookup(desc[j]);
871 }
872}
873
148f9bb8 874static const struct cpu_dev intel_cpu_dev = {
1da177e4 875 .c_vendor = "Intel",
65eb6b43 876 .c_ident = { "GenuineIntel" },
185f3b9d 877#ifdef CONFIG_X86_32
09dc68d9
JB
878 .legacy_models = {
879 { .family = 4, .model_names =
65eb6b43
PC
880 {
881 [0] = "486 DX-25/33",
882 [1] = "486 DX-50",
883 [2] = "486 SX",
884 [3] = "486 DX/2",
885 [4] = "486 SL",
886 [5] = "486 SX/2",
887 [7] = "486 DX/2-WB",
888 [8] = "486 DX/4",
1da177e4
LT
889 [9] = "486 DX/4-WB"
890 }
891 },
09dc68d9 892 { .family = 5, .model_names =
65eb6b43
PC
893 {
894 [0] = "Pentium 60/66 A-step",
895 [1] = "Pentium 60/66",
1da177e4 896 [2] = "Pentium 75 - 200",
65eb6b43 897 [3] = "OverDrive PODP5V83",
1da177e4 898 [4] = "Pentium MMX",
65eb6b43 899 [7] = "Mobile Pentium 75 - 200",
aece118e
BD
900 [8] = "Mobile Pentium MMX",
901 [9] = "Quark SoC X1000",
1da177e4
LT
902 }
903 },
09dc68d9 904 { .family = 6, .model_names =
65eb6b43 905 {
1da177e4 906 [0] = "Pentium Pro A-step",
65eb6b43
PC
907 [1] = "Pentium Pro",
908 [3] = "Pentium II (Klamath)",
909 [4] = "Pentium II (Deschutes)",
910 [5] = "Pentium II (Deschutes)",
1da177e4 911 [6] = "Mobile Pentium II",
65eb6b43
PC
912 [7] = "Pentium III (Katmai)",
913 [8] = "Pentium III (Coppermine)",
1da177e4
LT
914 [10] = "Pentium III (Cascades)",
915 [11] = "Pentium III (Tualatin)",
916 }
917 },
09dc68d9 918 { .family = 15, .model_names =
1da177e4
LT
919 {
920 [0] = "Pentium 4 (Unknown)",
921 [1] = "Pentium 4 (Willamette)",
922 [2] = "Pentium 4 (Northwood)",
923 [4] = "Pentium 4 (Foster)",
924 [5] = "Pentium 4 (Foster)",
925 }
926 },
927 },
09dc68d9 928 .legacy_cache_size = intel_size_cache,
185f3b9d 929#endif
e0ba94f1 930 .c_detect_tlb = intel_detect_tlb,
03ae5768 931 .c_early_init = early_init_intel,
1da177e4 932 .c_init = init_intel,
b51ef52d 933 .c_bsp_resume = intel_bsp_resume,
10a434fc 934 .c_x86_vendor = X86_VENDOR_INTEL,
1da177e4
LT
935};
936
10a434fc 937cpu_dev_register(intel_cpu_dev);
1da177e4 938