1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/bitops.h>
4 #include <linux/init.h>
5 #include <linux/kernel.h>
6 #include <linux/minmax.h>
8 #include <linux/string.h>
9 #include <linux/types.h>
12 #include <linux/topology.h>
16 #include <asm/cpu_device_id.h>
17 #include <asm/cpufeature.h>
19 #include <asm/cpuid/api.h>
20 #include <asm/hwcap2.h>
21 #include <asm/intel-family.h>
22 #include <asm/microcode.h>
25 #include <asm/resctrl.h>
26 #include <asm/thermal.h>
27 #include <asm/uaccess.h>
32 * Processors which have self-snooping capability can handle conflicting
33 * memory type across CPUs by snooping its own cache. However, there exists
34 * CPU models in which having conflicting memory types still leads to
35 * unpredictable behavior, machine check errors, or hangs. Clear this
36 * feature to prevent its use on machines with known erratas.
38 static void check_memory_type_self_snoop_errata(struct cpuinfo_x86 *c)
41 case INTEL_CORE_YONAH:
42 case INTEL_CORE2_MEROM:
43 case INTEL_CORE2_MEROM_L:
44 case INTEL_CORE2_PENRYN:
45 case INTEL_CORE2_DUNNINGTON:
48 case INTEL_NEHALEM_EP:
49 case INTEL_NEHALEM_EX:
51 case INTEL_WESTMERE_EP:
52 case INTEL_SANDYBRIDGE:
53 setup_clear_cpu_cap(X86_FEATURE_SELFSNOOP);
57 static bool ring3mwait_disabled __read_mostly;
59 static int __init ring3mwait_disable(char *__unused)
61 ring3mwait_disabled = true;
64 __setup("ring3mwait=disable", ring3mwait_disable);
66 static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
69 * Ring 3 MONITOR/MWAIT feature cannot be detected without
70 * cpu model and family comparison.
75 case INTEL_XEON_PHI_KNL:
76 case INTEL_XEON_PHI_KNM:
82 if (ring3mwait_disabled)
85 set_cpu_cap(c, X86_FEATURE_RING3MWAIT);
86 this_cpu_or(msr_misc_features_shadow,
87 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT);
89 if (c == &boot_cpu_data)
90 ELF_HWCAP2 |= HWCAP2_RING3MWAIT;
94 * Early microcode releases for the Spectre v2 mitigation were broken.
95 * Information taken from;
96 * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
97 * - https://kb.vmware.com/s/article/52345
98 * - Microcode revisions observed in the wild
99 * - Release note from 20180108 microcode release
101 struct sku_microcode {
106 static const struct sku_microcode spectre_bad_microcodes[] = {
107 { INTEL_KABYLAKE, 0x0B, 0x80 },
108 { INTEL_KABYLAKE, 0x0A, 0x80 },
109 { INTEL_KABYLAKE, 0x09, 0x80 },
110 { INTEL_KABYLAKE_L, 0x0A, 0x80 },
111 { INTEL_KABYLAKE_L, 0x09, 0x80 },
112 { INTEL_SKYLAKE_X, 0x03, 0x0100013e },
113 { INTEL_SKYLAKE_X, 0x04, 0x0200003c },
114 { INTEL_BROADWELL, 0x04, 0x28 },
115 { INTEL_BROADWELL_G, 0x01, 0x1b },
116 { INTEL_BROADWELL_D, 0x02, 0x14 },
117 { INTEL_BROADWELL_D, 0x03, 0x07000011 },
118 { INTEL_BROADWELL_X, 0x01, 0x0b000025 },
119 { INTEL_HASWELL_L, 0x01, 0x21 },
120 { INTEL_HASWELL_G, 0x01, 0x18 },
121 { INTEL_HASWELL, 0x03, 0x23 },
122 { INTEL_HASWELL_X, 0x02, 0x3b },
123 { INTEL_HASWELL_X, 0x04, 0x10 },
124 { INTEL_IVYBRIDGE_X, 0x04, 0x42a },
125 /* Observed in the wild */
126 { INTEL_SANDYBRIDGE_X, 0x06, 0x61b },
127 { INTEL_SANDYBRIDGE_X, 0x07, 0x712 },
130 static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
135 * We know that the hypervisor lie to us on the microcode version so
136 * we may as well hope that it is running the correct version.
138 if (cpu_has(c, X86_FEATURE_HYPERVISOR))
141 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
142 if (c->x86_vfm == spectre_bad_microcodes[i].vfm &&
143 c->x86_stepping == spectre_bad_microcodes[i].stepping)
144 return (c->microcode <= spectre_bad_microcodes[i].microcode);
149 #define MSR_IA32_TME_ACTIVATE 0x982
151 /* Helpers to access TME_ACTIVATE MSR */
152 #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
153 #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
155 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
157 static void detect_tme_early(struct cpuinfo_x86 *c)
162 rdmsrq(MSR_IA32_TME_ACTIVATE, tme_activate);
164 if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
165 pr_info_once("x86/tme: not enabled by BIOS\n");
166 clear_cpu_cap(c, X86_FEATURE_TME);
169 pr_info_once("x86/tme: enabled by BIOS\n");
170 keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
175 * KeyID bits are set by BIOS and can be present regardless
176 * of whether the kernel is using them. They effectively lower
177 * the number of physical address bits.
179 * Update cpuinfo_x86::x86_phys_bits accordingly.
181 c->x86_phys_bits -= keyid_bits;
182 pr_info_once("x86/mktme: BIOS enabled: x86_phys_bits reduced by %d\n",
186 void intel_unlock_cpuid_leafs(struct cpuinfo_x86 *c)
188 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
191 if (c->x86_vfm < INTEL_PENTIUM_M_DOTHAN)
195 * The BIOS can have limited CPUID to leaf 2, which breaks feature
196 * enumeration. Unlock it and update the maximum leaf info.
198 if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0)
199 c->cpuid_level = cpuid_eax(0);
202 static void early_init_intel(struct cpuinfo_x86 *c)
206 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
207 c->microcode = intel_get_microcode_revision();
209 /* Now if any of them are set, check the blacklist and clear the lot */
210 if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) ||
211 cpu_has(c, X86_FEATURE_INTEL_STIBP) ||
212 cpu_has(c, X86_FEATURE_IBRS) || cpu_has(c, X86_FEATURE_IBPB) ||
213 cpu_has(c, X86_FEATURE_STIBP)) && bad_spectre_microcode(c)) {
214 pr_warn("Intel Spectre v2 broken microcode detected; disabling Speculation Control\n");
215 setup_clear_cpu_cap(X86_FEATURE_IBRS);
216 setup_clear_cpu_cap(X86_FEATURE_IBPB);
217 setup_clear_cpu_cap(X86_FEATURE_STIBP);
218 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL);
219 setup_clear_cpu_cap(X86_FEATURE_MSR_SPEC_CTRL);
220 setup_clear_cpu_cap(X86_FEATURE_INTEL_STIBP);
221 setup_clear_cpu_cap(X86_FEATURE_SSBD);
222 setup_clear_cpu_cap(X86_FEATURE_SPEC_CTRL_SSBD);
226 * Atom erratum AAE44/AAF40/AAG38/AAH41:
228 * A race condition between speculative fetches and invalidating
229 * a large page. This is worked around in microcode, but we
230 * need the microcode to have already been loaded... so if it is
231 * not, recommend a BIOS update and disable large pages.
233 if (c->x86_vfm == INTEL_ATOM_BONNELL && c->x86_stepping <= 2 &&
234 c->microcode < 0x20e) {
235 pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n");
236 clear_cpu_cap(c, X86_FEATURE_PSE);
240 set_cpu_cap(c, X86_FEATURE_SYSENTER32);
242 /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */
243 if (c->x86 == 15 && c->x86_cache_alignment == 64)
244 c->x86_cache_alignment = 128;
247 /* CPUID workaround for 0F33/0F34 CPU */
248 if (c->x86_vfm == INTEL_P4_PRESCOTT &&
249 (c->x86_stepping == 0x3 || c->x86_stepping == 0x4))
250 c->x86_phys_bits = 36;
253 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
254 * with P/T states and does not stop in deep C-states.
256 * It is also reliable across cores and sockets. (but not across
257 * cabinets - we turn it off in that case explicitly.)
259 * Use a model-specific check for some older CPUs that have invariant
260 * TSC but may not report it architecturally via 8000_0007.
262 if (c->x86_power & (1 << 8)) {
263 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
264 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
265 } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) ||
266 (c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) {
267 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
270 /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */
271 switch (c->x86_vfm) {
272 case INTEL_ATOM_SALTWELL_MID:
273 case INTEL_ATOM_SALTWELL_TABLET:
274 case INTEL_ATOM_SILVERMONT_MID:
275 case INTEL_ATOM_AIRMONT_NP:
276 set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3);
281 * PAT is broken on early family 6 CPUs, the last of which
282 * is "Yonah" where the erratum is named "AN7":
284 * Page with PAT (Page Attribute Table) Set to USWC
285 * (Uncacheable Speculative Write Combine) While
286 * Associated MTRR (Memory Type Range Register) Is UC
287 * (Uncacheable) May Consolidate to UC
289 * Disable PAT and fall back to MTRR on these CPUs.
291 if (c->x86_vfm >= INTEL_PENTIUM_PRO &&
292 c->x86_vfm <= INTEL_CORE_YONAH)
293 clear_cpu_cap(c, X86_FEATURE_PAT);
296 * Modern CPUs are generally expected to have a sane fast string
297 * implementation. However, BIOSes typically have a knob to tweak
298 * the architectural MISC_ENABLE.FAST_STRING enable bit.
300 * Adhere to the preference and program the Linux-defined fast
301 * string flag and enhanced fast string capabilities accordingly.
303 if (c->x86_vfm >= INTEL_PENTIUM_M_DOTHAN) {
304 rdmsrq(MSR_IA32_MISC_ENABLE, misc_enable);
305 if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) {
306 /* X86_FEATURE_ERMS is set based on CPUID */
307 set_cpu_cap(c, X86_FEATURE_REP_GOOD);
309 pr_info("Disabled fast string operations\n");
310 setup_clear_cpu_cap(X86_FEATURE_REP_GOOD);
311 setup_clear_cpu_cap(X86_FEATURE_ERMS);
316 * Intel Quark Core DevMan_001.pdf section 6.4.11
317 * "The operating system also is required to invalidate (i.e., flush)
318 * the TLB when any changes are made to any of the page table entries.
319 * The operating system must reload CR3 to cause the TLB to be flushed"
321 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h
322 * should be false so that __flush_tlb_all() causes CR3 instead of CR4.PGE
325 if (c->x86_vfm == INTEL_QUARK_X1000) {
326 pr_info("Disabling PGE capability bit\n");
327 setup_clear_cpu_cap(X86_FEATURE_PGE);
330 check_memory_type_self_snoop_errata(c);
333 * Adjust the number of physical bits early because it affects the
334 * valid bits of the MTRR mask registers.
336 if (cpu_has(c, X86_FEATURE_TME))
340 static void bsp_init_intel(struct cpuinfo_x86 *c)
342 resctrl_cpu_detect(c);
347 * Early probe support logic for ppro memory erratum #50
349 * This is called before we do cpu ident work
352 int ppro_with_ram_bug(void)
354 /* Uses data from early_cpu_detect now */
355 if (boot_cpu_data.x86_vfm == INTEL_PENTIUM_PRO &&
356 boot_cpu_data.x86_stepping < 8) {
357 pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n");
363 static void intel_smp_check(struct cpuinfo_x86 *c)
365 /* calling is from identify_secondary_cpu() ? */
370 * Mask B, Pentium, but not Pentium MMX
372 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_PENTIUM_MMX &&
373 c->x86_stepping >= 1 && c->x86_stepping <= 4) {
375 * Remember we have B step Pentia with bugs
377 WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
378 "with B stepping processors.\n");
383 static int __init forcepae_setup(char *__unused)
388 __setup("forcepae", forcepae_setup);
390 static void intel_workarounds(struct cpuinfo_x86 *c)
392 #ifdef CONFIG_X86_F00F_BUG
394 * All models of Pentium and Pentium with MMX technology CPUs
395 * have the F0 0F bug, which lets nonprivileged users lock up the
396 * system. Announce that the fault handler will be checking for it.
397 * The Quark is also family 5, but does not have the same bug.
399 clear_cpu_bug(c, X86_BUG_F00F);
400 if (c->x86_vfm >= INTEL_FAM5_START && c->x86_vfm < INTEL_QUARK_X1000) {
401 static int f00f_workaround_enabled;
403 set_cpu_bug(c, X86_BUG_F00F);
404 if (!f00f_workaround_enabled) {
405 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n");
406 f00f_workaround_enabled = 1;
412 * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until
415 if ((c->x86_vfm == INTEL_PENTIUM_II_KLAMATH && c->x86_stepping < 3) ||
416 c->x86_vfm < INTEL_PENTIUM_II_KLAMATH)
417 clear_cpu_cap(c, X86_FEATURE_SEP);
420 * PAE CPUID issue: many Pentium M report no PAE but may have a
421 * functionally usable PAE implementation.
422 * Forcefully enable PAE if kernel parameter "forcepae" is present.
425 pr_warn("PAE forced!\n");
426 set_cpu_cap(c, X86_FEATURE_PAE);
427 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE);
431 * P4 Xeon erratum 037 workaround.
432 * Hardware prefetcher may cause stale data to be loaded into the cache.
434 if (c->x86_vfm == INTEL_P4_WILLAMETTE && c->x86_stepping == 1) {
435 if (msr_set_bit(MSR_IA32_MISC_ENABLE,
436 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) {
437 pr_info("CPU: C0 stepping P4 Xeon detected.\n");
438 pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n");
443 * See if we have a good local APIC by checking for buggy Pentia,
444 * i.e. all B steppings and the C2 stepping of P54C when using their
445 * integrated APIC (see 11AP erratum in "Pentium Processor
446 * Specification Update").
448 if (boot_cpu_has(X86_FEATURE_APIC) && c->x86_vfm == INTEL_PENTIUM_75 &&
449 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb))
450 set_cpu_bug(c, X86_BUG_11AP);
452 #ifdef CONFIG_X86_INTEL_USERCOPY
454 * MOVSL bulk memory moves can be slow when source and dest are not
455 * both 8-byte aligned. PII/PIII only like MOVSL with 8-byte alignment.
457 * Set the preferred alignment for Pentium Pro and newer processors, as
458 * it has only been tested on these.
460 if (c->x86_vfm >= INTEL_PENTIUM_PRO)
467 static void intel_workarounds(struct cpuinfo_x86 *c)
472 static void srat_detect_node(struct cpuinfo_x86 *c)
476 int cpu = smp_processor_id();
478 /* Don't do the funky fallback heuristics the AMD version employs
480 node = numa_cpu_node(cpu);
481 if (node == NUMA_NO_NODE || !node_online(node)) {
482 /* reuse the value from init_cpu_to_node() */
483 node = cpu_to_node(cpu);
485 numa_set_node(cpu, node);
489 static void init_cpuid_fault(struct cpuinfo_x86 *c)
493 if (!rdmsrq_safe(MSR_PLATFORM_INFO, &msr)) {
494 if (msr & MSR_PLATFORM_INFO_CPUID_FAULT)
495 set_cpu_cap(c, X86_FEATURE_CPUID_FAULT);
499 static void init_intel_misc_features(struct cpuinfo_x86 *c)
503 if (rdmsrq_safe(MSR_MISC_FEATURES_ENABLES, &msr))
506 /* Clear all MISC features */
507 this_cpu_write(msr_misc_features_shadow, 0);
509 /* Check features and update capabilities and shadow control bits */
511 probe_xeon_phi_r3mwait(c);
513 msr = this_cpu_read(msr_misc_features_shadow);
514 wrmsrq(MSR_MISC_FEATURES_ENABLES, msr);
518 * This is a list of Intel CPUs that are known to suffer from downclocking when
519 * ZMM registers (512-bit vectors) are used. On these CPUs, when the kernel
520 * executes SIMD-optimized code such as cryptography functions or CRCs, it
521 * should prefer 256-bit (YMM) code to 512-bit (ZMM) code.
523 static const struct x86_cpu_id zmm_exclusion_list[] = {
524 X86_MATCH_VFM(INTEL_SKYLAKE_X, 0),
525 X86_MATCH_VFM(INTEL_ICELAKE_X, 0),
526 X86_MATCH_VFM(INTEL_ICELAKE_D, 0),
527 X86_MATCH_VFM(INTEL_ICELAKE, 0),
528 X86_MATCH_VFM(INTEL_ICELAKE_L, 0),
529 X86_MATCH_VFM(INTEL_ICELAKE_NNPI, 0),
530 X86_MATCH_VFM(INTEL_TIGERLAKE_L, 0),
531 X86_MATCH_VFM(INTEL_TIGERLAKE, 0),
532 /* Allow Rocket Lake and later, and Sapphire Rapids and later. */
536 static void init_intel(struct cpuinfo_x86 *c)
540 intel_workarounds(c);
542 init_intel_cacheinfo(c);
544 if (c->cpuid_level > 9) {
545 unsigned eax = cpuid_eax(10);
546 /* Check for version and the number of counters */
547 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
548 set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON);
551 if (cpu_has(c, X86_FEATURE_XMM2))
552 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
554 if (boot_cpu_has(X86_FEATURE_DS)) {
557 rdmsr(MSR_IA32_MISC_ENABLE, l1, l2);
558 if (!(l1 & MSR_IA32_MISC_ENABLE_BTS_UNAVAIL))
559 set_cpu_cap(c, X86_FEATURE_BTS);
560 if (!(l1 & MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL))
561 set_cpu_cap(c, X86_FEATURE_PEBS);
564 if (boot_cpu_has(X86_FEATURE_CLFLUSH) &&
565 (c->x86_vfm == INTEL_CORE2_DUNNINGTON ||
566 c->x86_vfm == INTEL_NEHALEM_EX ||
567 c->x86_vfm == INTEL_WESTMERE_EX))
568 set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR);
570 if (boot_cpu_has(X86_FEATURE_MWAIT) &&
571 (c->x86_vfm == INTEL_ATOM_GOLDMONT ||
572 c->x86_vfm == INTEL_LUNARLAKE_M))
573 set_cpu_bug(c, X86_BUG_MONITOR);
577 c->x86_cache_alignment = c->x86_clflush_size * 2;
580 * Names for the Pentium II/Celeron processors
581 * detectable only by also checking the cache size.
582 * Dixon is NOT a Celeron.
585 unsigned int l2 = c->x86_cache_size;
588 switch (c->x86_model) {
591 p = "Celeron (Covington)";
593 p = "Mobile Pentium II (Dixon)";
598 p = "Celeron (Mendocino)";
599 else if (c->x86_stepping == 0 || c->x86_stepping == 5)
605 p = "Celeron (Coppermine)";
610 strcpy(c->x86_model_id, p);
614 if (x86_match_cpu(zmm_exclusion_list))
615 set_cpu_cap(c, X86_FEATURE_PREFER_YMM);
617 /* Work around errata */
620 init_ia32_feat_ctl(c);
622 init_intel_misc_features(c);
626 intel_init_thermal(c);
630 static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
633 * Intel PIII Tualatin. This comes in two flavours.
634 * One has 256kb of cache, the other 512. We have no way
635 * to determine which, so we use a boottime override
636 * for the 512kb model, and assume 256 otherwise.
638 if (c->x86_vfm == INTEL_PENTIUM_III_TUALATIN && size == 0)
642 * Intel Quark SoC X1000 contains a 4-way set associative
643 * 16K cache with a 16 byte cache line and 256 lines per tag
645 if (c->x86_vfm == INTEL_QUARK_X1000)
651 static void intel_tlb_lookup(const struct leaf_0x2_table *desc)
653 short entries = desc->entries;
655 switch (desc->t_type) {
657 tlb_lli_4k = max(tlb_lli_4k, entries);
658 tlb_lld_4k = max(tlb_lld_4k, entries);
661 tlb_lli_4k = max(tlb_lli_4k, entries);
662 tlb_lld_4k = max(tlb_lld_4k, entries);
663 tlb_lli_2m = max(tlb_lli_2m, entries);
664 tlb_lld_2m = max(tlb_lld_2m, entries);
665 tlb_lli_4m = max(tlb_lli_4m, entries);
666 tlb_lld_4m = max(tlb_lld_4m, entries);
669 tlb_lli_4k = max(tlb_lli_4k, entries);
670 tlb_lli_2m = max(tlb_lli_2m, entries);
671 tlb_lli_4m = max(tlb_lli_4m, entries);
674 tlb_lli_4k = max(tlb_lli_4k, entries);
677 tlb_lli_4m = max(tlb_lli_4m, entries);
680 tlb_lli_2m = max(tlb_lli_2m, entries);
681 tlb_lli_4m = max(tlb_lli_4m, entries);
685 tlb_lld_4k = max(tlb_lld_4k, entries);
689 tlb_lld_4m = max(tlb_lld_4m, entries);
692 case TLB_DATA0_2M_4M:
693 tlb_lld_2m = max(tlb_lld_2m, entries);
694 tlb_lld_4m = max(tlb_lld_4m, entries);
697 tlb_lld_4k = max(tlb_lld_4k, entries);
698 tlb_lld_4m = max(tlb_lld_4m, entries);
700 case TLB_DATA_1G_2M_4M:
701 tlb_lld_2m = max(tlb_lld_2m, TLB_0x63_2M_4M_ENTRIES);
702 tlb_lld_4m = max(tlb_lld_4m, TLB_0x63_2M_4M_ENTRIES);
705 tlb_lld_1g = max(tlb_lld_1g, entries);
710 static void intel_detect_tlb(struct cpuinfo_x86 *c)
712 const struct leaf_0x2_table *desc;
713 union leaf_0x2_regs regs;
716 if (c->cpuid_level < 2)
719 cpuid_leaf_0x2(®s);
720 for_each_cpuid_0x2_desc(regs, ptr, desc)
721 intel_tlb_lookup(desc);
724 static const struct cpu_dev intel_cpu_dev = {
726 .c_ident = { "GenuineIntel" },
729 { .family = 4, .model_names =
731 [0] = "486 DX-25/33",
742 { .family = 5, .model_names =
744 [0] = "Pentium 60/66 A-step",
745 [1] = "Pentium 60/66",
746 [2] = "Pentium 75 - 200",
747 [3] = "OverDrive PODP5V83",
749 [7] = "Mobile Pentium 75 - 200",
750 [8] = "Mobile Pentium MMX",
751 [9] = "Quark SoC X1000",
754 { .family = 6, .model_names =
756 [0] = "Pentium Pro A-step",
758 [3] = "Pentium II (Klamath)",
759 [4] = "Pentium II (Deschutes)",
760 [5] = "Pentium II (Deschutes)",
761 [6] = "Mobile Pentium II",
762 [7] = "Pentium III (Katmai)",
763 [8] = "Pentium III (Coppermine)",
764 [10] = "Pentium III (Cascades)",
765 [11] = "Pentium III (Tualatin)",
768 { .family = 15, .model_names =
770 [0] = "Pentium 4 (Unknown)",
771 [1] = "Pentium 4 (Willamette)",
772 [2] = "Pentium 4 (Northwood)",
773 [4] = "Pentium 4 (Foster)",
774 [5] = "Pentium 4 (Foster)",
778 .legacy_cache_size = intel_size_cache,
780 .c_detect_tlb = intel_detect_tlb,
781 .c_early_init = early_init_intel,
782 .c_bsp_init = bsp_init_intel,
783 .c_init = init_intel,
784 .c_x86_vendor = X86_VENDOR_INTEL,
787 cpu_dev_register(intel_cpu_dev);