1 #include <linux/init.h>
2 #include <linux/kernel.h>
4 #include <linux/string.h>
5 #include <linux/bitops.h>
7 #include <linux/sched.h>
8 #include <linux/thread_info.h>
9 #include <linux/module.h>
10 #include <linux/uaccess.h>
12 #include <asm/processor.h>
13 #include <asm/pgtable.h>
19 #include <linux/topology.h>
20 #include <asm/numa_64.h>
25 #ifdef CONFIG_X86_LOCAL_APIC
26 #include <asm/mpspec.h>
30 static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
32 /* Unmask CPUID levels if masked: */
33 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) {
36 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
38 if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) {
39 misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID;
40 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
41 c->cpuid_level = cpuid_eax(0);
45 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
46 (c->x86 == 0x6 && c->x86_model >= 0x0e))
47 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
50 * Atom erratum AAE44/AAF40/AAG38/AAH41:
52 * A race condition between speculative fetches and invalidating
53 * a large page. This is worked around in microcode, but we
54 * need the microcode to have already been loaded... so if it is
55 * not, recommend a BIOS update and disable large pages.
57 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2) {
60 wrmsr(MSR_IA32_UCODE_REV, 0, 0);
62 rdmsr(MSR_IA32_UCODE_REV, junk, ucode);
65 printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n");
66 clear_cpu_cap(c, X86_FEATURE_PSE);
71 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
73 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
74 if (c->x86 == 15 && c->x86_cache_alignment == 64)
75 c->x86_cache_alignment = 128;
78 /* CPUID workaround for 0F33/0F34 CPU */
79 if (c->x86 == 0xF && c->x86_model == 0x3
80 && (c->x86_mask == 0x3 || c->x86_mask == 0x4))
81 c->x86_phys_bits = 36;
84 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
85 * with P/T states and does not stop in deep C-states.
87 * It is also reliable across cores and sockets. (but not across
88 * cabinets - we turn it off in that case explicitly.)
90 if (c->x86_power & (1 << 8)) {
91 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
92 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
93 if (!check_tsc_unstable())
94 sched_clock_stable = 1;
98 * There is a known erratum on Pentium III and Core Solo
100 * " Page with PAT set to WC while associated MTRR is UC
101 * may consolidate to UC "
102 * Because of this erratum, it is better to stick with
103 * setting WC in MTRR rather than using PAT on these CPUs.
105 * Enable PAT WC only on P4, Core 2 or later CPUs.
107 if (c->x86 == 6 && c->x86_model < 15)
108 clear_cpu_cap(c, X86_FEATURE_PAT);
110 #ifdef CONFIG_KMEMCHECK
112 * P4s have a "fast strings" feature which causes single-
113 * stepping REP instructions to only generate a #DB on
114 * cache-line boundaries.
116 * Ingo Molnar reported a Pentium D (model 6) and a Xeon
117 * (model 2) with the same problem.
122 rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
124 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
125 printk(KERN_INFO "kmemcheck: Disabling fast string operations\n");
127 misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING;
128 wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable);
136 * Early probe support logic for ppro memory erratum #50
138 * This is called before we do cpu ident work
141 int __cpuinit ppro_with_ram_bug(void)
143 /* Uses data from early_cpu_detect now */
144 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
145 boot_cpu_data.x86 == 6 &&
146 boot_cpu_data.x86_model == 1 &&
147 boot_cpu_data.x86_mask < 8) {
148 printk(KERN_INFO "Pentium Pro with Errata#50 detected. Taking evasive action.\n");
154 #ifdef CONFIG_X86_F00F_BUG
155 static void __cpuinit trap_init_f00f_bug(void)
157 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
160 * Update the IDT descriptor and reload the IDT so that
161 * it uses the read-only mapped virtual address.
163 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
164 load_idt(&idt_descr);
168 static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
171 /* calling is from identify_secondary_cpu() ? */
172 if (c->cpu_index == boot_cpu_id)
176 * Mask B, Pentium, but not Pentium MMX
179 c->x86_mask >= 1 && c->x86_mask <= 4 &&
182 * Remember we have B step Pentia with bugs
184 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
185 "with B stepping processors.\n");
190 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
192 unsigned long lo, hi;
194 #ifdef CONFIG_X86_F00F_BUG
196 * All current models of Pentium and Pentium with MMX technology CPUs
197 * have the F0 0F bug, which lets nonprivileged users lock up the
199 * Note that the workaround only should be initialized once...
202 if (!paravirt_enabled() && c->x86 == 5) {
203 static int f00f_workaround_enabled;
206 if (!f00f_workaround_enabled) {
207 trap_init_f00f_bug();
208 printk(KERN_NOTICE "Intel Pentium with F0 0F bug - workaround enabled.\n");
209 f00f_workaround_enabled = 1;
215 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
218 if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633)
219 clear_cpu_cap(c, X86_FEATURE_SEP);
222 * P4 Xeon errata 037 workaround.
223 * Hardware prefetcher may cause stale data to be loaded into the cache.
225 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
226 rdmsr(MSR_IA32_MISC_ENABLE, lo, hi);
227 if ((lo & MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE) == 0) {
228 printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
229 printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
230 lo |= MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE;
231 wrmsr(MSR_IA32_MISC_ENABLE, lo, hi);
236 * See if we have a good local APIC by checking for buggy Pentia,
237 * i.e. all B steppings and the C2 stepping of P54C when using their
238 * integrated APIC (see 11AP erratum in "Pentium Processor
239 * Specification Update").
241 if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 &&
242 (c->x86_mask < 0x6 || c->x86_mask == 0xb))
243 set_cpu_cap(c, X86_FEATURE_11AP);
246 #ifdef CONFIG_X86_INTEL_USERCOPY
248 * Set up the preferred alignment for movsl bulk memory moves
251 case 4: /* 486: untested */
253 case 5: /* Old Pentia: untested */
255 case 6: /* PII/PIII only like movsl with 8-byte alignment */
258 case 15: /* P4 is OK down to 8-byte alignment */
264 #ifdef CONFIG_X86_NUMAQ
271 static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
276 static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
278 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
280 int cpu = smp_processor_id();
281 int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
283 /* Don't do the funky fallback heuristics the AMD version employs
285 node = apicid_to_node[apicid];
286 if (node == NUMA_NO_NODE)
287 node = first_node(node_online_map);
288 else if (!node_online(node)) {
289 /* reuse the value from init_cpu_to_node() */
290 node = cpu_to_node(cpu);
292 numa_set_node(cpu, node);
297 * find out the number of processor cores on the die
299 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
301 unsigned int eax, ebx, ecx, edx;
303 if (c->cpuid_level < 4)
306 /* Intel has a non-standard dependency on %ecx for this CPUID level. */
307 cpuid_count(4, 0, &eax, &ebx, &ecx, &edx);
309 return (eax >> 26) + 1;
314 static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
316 /* Intel VMX MSR indicated features */
317 #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
318 #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000
319 #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000
320 #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001
321 #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002
322 #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020
324 u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2;
326 clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
327 clear_cpu_cap(c, X86_FEATURE_VNMI);
328 clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
329 clear_cpu_cap(c, X86_FEATURE_EPT);
330 clear_cpu_cap(c, X86_FEATURE_VPID);
332 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high);
333 msr_ctl = vmx_msr_high | vmx_msr_low;
334 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)
335 set_cpu_cap(c, X86_FEATURE_TPR_SHADOW);
336 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI)
337 set_cpu_cap(c, X86_FEATURE_VNMI);
338 if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) {
339 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
340 vmx_msr_low, vmx_msr_high);
341 msr_ctl2 = vmx_msr_high | vmx_msr_low;
342 if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) &&
343 (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW))
344 set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY);
345 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT)
346 set_cpu_cap(c, X86_FEATURE_EPT);
347 if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID)
348 set_cpu_cap(c, X86_FEATURE_VPID);
352 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
358 intel_workarounds(c);
361 * Detect the extended topology information if available. This
362 * will reinitialise the initial_apicid which will be used
363 * in init_intel_cacheinfo()
365 detect_extended_topology(c);
367 l2 = init_intel_cacheinfo(c);
368 if (c->cpuid_level > 9) {
369 unsigned eax = cpuid_eax(10);
370 /* Check for version and the number of counters */
371 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
372 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
376 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
379 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
381 set_cpu_cap(c, X86_FEATURE_BTS);
383 set_cpu_cap(c, X86_FEATURE_PEBS);
386 if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
387 set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
391 c->x86_cache_alignment = c->x86_clflush_size * 2;
393 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
396 * Names for the Pentium II/Celeron processors
397 * detectable only by also checking the cache size.
398 * Dixon is NOT a Celeron.
403 switch (c->x86_model) {
405 if (c->x86_mask == 0) {
407 p = "Celeron (Covington)";
409 p = "Mobile Pentium II (Dixon)";
415 p = "Celeron (Mendocino)";
416 else if (c->x86_mask == 0 || c->x86_mask == 5)
422 p = "Celeron (Coppermine)";
427 strcpy(c->x86_model_id, p);
431 set_cpu_cap(c, X86_FEATURE_P4);
433 set_cpu_cap(c, X86_FEATURE_P3);
436 if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
438 * let's use the legacy cpuid vector 0x1 and 0x4 for topology
441 c->x86_max_cores = intel_num_cpu_cores(c);
447 /* Work around errata */
450 if (cpu_has(c, X86_FEATURE_VMX))
451 detect_vmx_virtcap(c);
455 static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
458 * Intel PIII Tualatin. This comes in two flavours.
459 * One has 256kb of cache, the other 512. We have no way
460 * to determine which, so we use a boottime override
461 * for the 512kb model, and assume 256 otherwise.
463 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0))
469 static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
471 .c_ident = { "GenuineIntel" },
474 { .vendor = X86_VENDOR_INTEL, .family = 4, .model_names =
476 [0] = "486 DX-25/33",
487 { .vendor = X86_VENDOR_INTEL, .family = 5, .model_names =
489 [0] = "Pentium 60/66 A-step",
490 [1] = "Pentium 60/66",
491 [2] = "Pentium 75 - 200",
492 [3] = "OverDrive PODP5V83",
494 [7] = "Mobile Pentium 75 - 200",
495 [8] = "Mobile Pentium MMX"
498 { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names =
500 [0] = "Pentium Pro A-step",
502 [3] = "Pentium II (Klamath)",
503 [4] = "Pentium II (Deschutes)",
504 [5] = "Pentium II (Deschutes)",
505 [6] = "Mobile Pentium II",
506 [7] = "Pentium III (Katmai)",
507 [8] = "Pentium III (Coppermine)",
508 [10] = "Pentium III (Cascades)",
509 [11] = "Pentium III (Tualatin)",
512 { .vendor = X86_VENDOR_INTEL, .family = 15, .model_names =
514 [0] = "Pentium 4 (Unknown)",
515 [1] = "Pentium 4 (Willamette)",
516 [2] = "Pentium 4 (Northwood)",
517 [4] = "Pentium 4 (Foster)",
518 [5] = "Pentium 4 (Foster)",
522 .c_size_cache = intel_size_cache,
524 .c_early_init = early_init_intel,
525 .c_init = init_intel,
526 .c_x86_vendor = X86_VENDOR_INTEL,
529 cpu_dev_register(intel_cpu_dev);