Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | #include <linux/kernel.h> |
3 | ||
4 | #include <linux/string.h> | |
5 | #include <linux/bitops.h> | |
6 | #include <linux/smp.h> | |
83ce4009 | 7 | #include <linux/sched.h> |
e6017571 | 8 | #include <linux/sched/clock.h> |
1da177e4 | 9 | #include <linux/thread_info.h> |
186f4360 | 10 | #include <linux/init.h> |
8bdbd962 | 11 | #include <linux/uaccess.h> |
1da177e4 | 12 | |
cd4d09ec | 13 | #include <asm/cpufeature.h> |
d72b1b4f | 14 | #include <asm/pgtable.h> |
1da177e4 | 15 | #include <asm/msr.h> |
73bdb73f | 16 | #include <asm/bugs.h> |
1f442d70 | 17 | #include <asm/cpu.h> |
08e237fa | 18 | #include <asm/intel-family.h> |
4167709b | 19 | #include <asm/microcode_intel.h> |
e16fd002 GA |
20 | #include <asm/hwcap2.h> |
21 | #include <asm/elf.h> | |
1da177e4 | 22 | |
185f3b9d | 23 | #ifdef CONFIG_X86_64 |
8bdbd962 | 24 | #include <linux/topology.h> |
185f3b9d YL |
25 | #endif |
26 | ||
1da177e4 LT |
27 | #include "cpu.h" |
28 | ||
29 | #ifdef CONFIG_X86_LOCAL_APIC | |
30 | #include <asm/mpspec.h> | |
31 | #include <asm/apic.h> | |
1da177e4 LT |
32 | #endif |
33 | ||
0f6ff2bc DH |
34 | /* |
35 | * Just in case our CPU detection goes bad, or you have a weird system, | |
36 | * allow a way to override the automatic disabling of MPX. | |
37 | */ | |
38 | static int forcempx; | |
39 | ||
40 | static int __init forcempx_setup(char *__unused) | |
41 | { | |
42 | forcempx = 1; | |
43 | ||
44 | return 1; | |
45 | } | |
46 | __setup("intel-skd-046-workaround=disable", forcempx_setup); | |
47 | ||
48 | void check_mpx_erratum(struct cpuinfo_x86 *c) | |
49 | { | |
50 | if (forcempx) | |
51 | return; | |
52 | /* | |
53 | * Turn off the MPX feature on CPUs where SMEP is not | |
54 | * available or disabled. | |
55 | * | |
56 | * Works around Intel Erratum SKD046: "Branch Instructions | |
57 | * May Initialize MPX Bound Registers Incorrectly". | |
58 | * | |
59 | * This might falsely disable MPX on systems without | |
60 | * SMEP, like Atom processors without SMEP. But there | |
61 | * is no such hardware known at the moment. | |
62 | */ | |
63 | if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) { | |
64 | setup_clear_cpu_cap(X86_FEATURE_MPX); | |
65 | pr_warn("x86/mpx: Disabling MPX since SMEP not present\n"); | |
66 | } | |
67 | } | |
68 | ||
e16fd002 GA |
69 | static bool ring3mwait_disabled __read_mostly; |
70 | ||
71 | static int __init ring3mwait_disable(char *__unused) | |
72 | { | |
73 | ring3mwait_disabled = true; | |
74 | return 0; | |
75 | } | |
76 | __setup("ring3mwait=disable", ring3mwait_disable); | |
77 | ||
78 | static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c) | |
79 | { | |
80 | /* | |
81 | * Ring 3 MONITOR/MWAIT feature cannot be detected without | |
82 | * cpu model and family comparison. | |
83 | */ | |
4d8bb006 | 84 | if (c->x86 != 6) |
e16fd002 | 85 | return; |
4d8bb006 PL |
86 | switch (c->x86_model) { |
87 | case INTEL_FAM6_XEON_PHI_KNL: | |
88 | case INTEL_FAM6_XEON_PHI_KNM: | |
89 | break; | |
90 | default: | |
91 | return; | |
92 | } | |
e16fd002 | 93 | |
e9ea1e7f | 94 | if (ring3mwait_disabled) |
e16fd002 | 95 | return; |
e16fd002 GA |
96 | |
97 | set_cpu_cap(c, X86_FEATURE_RING3MWAIT); | |
e9ea1e7f KH |
98 | this_cpu_or(msr_misc_features_shadow, |
99 | 1UL << MSR_MISC_FEATURES_ENABLES_RING3MWAIT_BIT); | |
e16fd002 GA |
100 | |
101 | if (c == &boot_cpu_data) | |
102 | ELF_HWCAP2 |= HWCAP2_RING3MWAIT; | |
103 | } | |
104 | ||
a5b29663 DW |
105 | /* |
106 | * Early microcode releases for the Spectre v2 mitigation were broken. | |
107 | * Information taken from; | |
108 | * - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf | |
109 | * - https://kb.vmware.com/s/article/52345 | |
110 | * - Microcode revisions observed in the wild | |
111 | * - Release note from 20180108 microcode release | |
112 | */ | |
113 | struct sku_microcode { | |
114 | u8 model; | |
115 | u8 stepping; | |
116 | u32 microcode; | |
117 | }; | |
118 | static const struct sku_microcode spectre_bad_microcodes[] = { | |
119 | { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0B, 0x84 }, | |
120 | { INTEL_FAM6_KABYLAKE_DESKTOP, 0x0A, 0x84 }, | |
121 | { INTEL_FAM6_KABYLAKE_DESKTOP, 0x09, 0x84 }, | |
122 | { INTEL_FAM6_KABYLAKE_MOBILE, 0x0A, 0x84 }, | |
123 | { INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x84 }, | |
124 | { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e }, | |
125 | { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c }, | |
126 | { INTEL_FAM6_SKYLAKE_MOBILE, 0x03, 0xc2 }, | |
127 | { INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 }, | |
128 | { INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 }, | |
129 | { INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b }, | |
130 | { INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 }, | |
131 | { INTEL_FAM6_BROADWELL_XEON_D, 0x03, 0x07000011 }, | |
132 | { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 }, | |
133 | { INTEL_FAM6_HASWELL_ULT, 0x01, 0x21 }, | |
134 | { INTEL_FAM6_HASWELL_GT3E, 0x01, 0x18 }, | |
135 | { INTEL_FAM6_HASWELL_CORE, 0x03, 0x23 }, | |
136 | { INTEL_FAM6_HASWELL_X, 0x02, 0x3b }, | |
137 | { INTEL_FAM6_HASWELL_X, 0x04, 0x10 }, | |
138 | { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a }, | |
139 | /* Updated in the 20180108 release; blacklist until we know otherwise */ | |
140 | { INTEL_FAM6_ATOM_GEMINI_LAKE, 0x01, 0x22 }, | |
141 | /* Observed in the wild */ | |
142 | { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b }, | |
143 | { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 }, | |
144 | }; | |
145 | ||
146 | static bool bad_spectre_microcode(struct cpuinfo_x86 *c) | |
147 | { | |
148 | int i; | |
149 | ||
150 | for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { | |
151 | if (c->x86_model == spectre_bad_microcodes[i].model && | |
152 | c->x86_mask == spectre_bad_microcodes[i].stepping) | |
153 | return (c->microcode <= spectre_bad_microcodes[i].microcode); | |
154 | } | |
155 | return false; | |
156 | } | |
157 | ||
148f9bb8 | 158 | static void early_init_intel(struct cpuinfo_x86 *c) |
1da177e4 | 159 | { |
161ec53c FY |
160 | u64 misc_enable; |
161 | ||
99fb4d34 | 162 | /* Unmask CPUID levels if masked: */ |
30a0fb94 | 163 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { |
0b131be8 PA |
164 | if (msr_clear_bit(MSR_IA32_MISC_ENABLE, |
165 | MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { | |
99fb4d34 | 166 | c->cpuid_level = cpuid_eax(0); |
d900329e | 167 | get_cpu_cap(c); |
99fb4d34 | 168 | } |
066941bd PA |
169 | } |
170 | ||
2b16a235 AK |
171 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
172 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | |
173 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
185f3b9d | 174 | |
4167709b BP |
175 | if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) |
176 | c->microcode = intel_get_microcode_revision(); | |
506ed6b5 | 177 | |
a5b29663 DW |
178 | if ((cpu_has(c, X86_FEATURE_SPEC_CTRL) || |
179 | cpu_has(c, X86_FEATURE_STIBP) || | |
180 | cpu_has(c, X86_FEATURE_AMD_SPEC_CTRL) || | |
181 | cpu_has(c, X86_FEATURE_AMD_PRED_CMD) || | |
182 | cpu_has(c, X86_FEATURE_AMD_STIBP)) && bad_spectre_microcode(c)) { | |
183 | pr_warn("Intel Spectre v2 broken microcode detected; disabling SPEC_CTRL\n"); | |
184 | clear_cpu_cap(c, X86_FEATURE_SPEC_CTRL); | |
185 | clear_cpu_cap(c, X86_FEATURE_STIBP); | |
186 | clear_cpu_cap(c, X86_FEATURE_AMD_SPEC_CTRL); | |
187 | clear_cpu_cap(c, X86_FEATURE_AMD_PRED_CMD); | |
188 | clear_cpu_cap(c, X86_FEATURE_AMD_STIBP); | |
189 | } | |
190 | ||
7a0fc404 PA |
191 | /* |
192 | * Atom erratum AAE44/AAF40/AAG38/AAH41: | |
193 | * | |
194 | * A race condition between speculative fetches and invalidating | |
195 | * a large page. This is worked around in microcode, but we | |
196 | * need the microcode to have already been loaded... so if it is | |
197 | * not, recommend a BIOS update and disable large pages. | |
198 | */ | |
30963c0a AK |
199 | if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && |
200 | c->microcode < 0x20e) { | |
1b74dde7 | 201 | pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); |
30963c0a | 202 | clear_cpu_cap(c, X86_FEATURE_PSE); |
7a0fc404 PA |
203 | } |
204 | ||
185f3b9d YL |
205 | #ifdef CONFIG_X86_64 |
206 | set_cpu_cap(c, X86_FEATURE_SYSENTER32); | |
207 | #else | |
208 | /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ | |
209 | if (c->x86 == 15 && c->x86_cache_alignment == 64) | |
210 | c->x86_cache_alignment = 128; | |
211 | #endif | |
40fb1715 | 212 | |
13c6c532 JB |
213 | /* CPUID workaround for 0F33/0F34 CPU */ |
214 | if (c->x86 == 0xF && c->x86_model == 0x3 | |
215 | && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) | |
216 | c->x86_phys_bits = 36; | |
217 | ||
40fb1715 VP |
218 | /* |
219 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | |
83ce4009 IM |
220 | * with P/T states and does not stop in deep C-states. |
221 | * | |
222 | * It is also reliable across cores and sockets. (but not across | |
223 | * cabinets - we turn it off in that case explicitly.) | |
40fb1715 VP |
224 | */ |
225 | if (c->x86_power & (1 << 8)) { | |
226 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | |
227 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | |
228 | } | |
229 | ||
c54fdbb2 FT |
230 | /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ |
231 | if (c->x86 == 6) { | |
232 | switch (c->x86_model) { | |
233 | case 0x27: /* Penwell */ | |
234 | case 0x35: /* Cloverview */ | |
354dbaa7 | 235 | case 0x4a: /* Merrifield */ |
c54fdbb2 FT |
236 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); |
237 | break; | |
238 | default: | |
239 | break; | |
240 | } | |
241 | } | |
242 | ||
75a04811 PA |
243 | /* |
244 | * There is a known erratum on Pentium III and Core Solo | |
245 | * and Core Duo CPUs. | |
246 | * " Page with PAT set to WC while associated MTRR is UC | |
247 | * may consolidate to UC " | |
248 | * Because of this erratum, it is better to stick with | |
249 | * setting WC in MTRR rather than using PAT on these CPUs. | |
250 | * | |
251 | * Enable PAT WC only on P4, Core 2 or later CPUs. | |
252 | */ | |
253 | if (c->x86 == 6 && c->x86_model < 15) | |
254 | clear_cpu_cap(c, X86_FEATURE_PAT); | |
f8561296 VN |
255 | |
256 | #ifdef CONFIG_KMEMCHECK | |
257 | /* | |
258 | * P4s have a "fast strings" feature which causes single- | |
259 | * stepping REP instructions to only generate a #DB on | |
260 | * cache-line boundaries. | |
261 | * | |
262 | * Ingo Molnar reported a Pentium D (model 6) and a Xeon | |
263 | * (model 2) with the same problem. | |
264 | */ | |
c0a639ad | 265 | if (c->x86 == 15) |
0b131be8 PA |
266 | if (msr_clear_bit(MSR_IA32_MISC_ENABLE, |
267 | MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) | |
c0a639ad | 268 | pr_info("kmemcheck: Disabling fast string operations\n"); |
f8561296 | 269 | #endif |
161ec53c FY |
270 | |
271 | /* | |
272 | * If fast string is not enabled in IA32_MISC_ENABLE for any reason, | |
273 | * clear the fast string and enhanced fast string CPU capabilities. | |
274 | */ | |
275 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { | |
276 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | |
277 | if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { | |
1b74dde7 | 278 | pr_info("Disabled fast string operations\n"); |
161ec53c FY |
279 | setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); |
280 | setup_clear_cpu_cap(X86_FEATURE_ERMS); | |
281 | } | |
282 | } | |
ee1b5b16 BD |
283 | |
284 | /* | |
285 | * Intel Quark Core DevMan_001.pdf section 6.4.11 | |
286 | * "The operating system also is required to invalidate (i.e., flush) | |
287 | * the TLB when any changes are made to any of the page table entries. | |
288 | * The operating system must reload CR3 to cause the TLB to be flushed" | |
289 | * | |
c109bf95 BP |
290 | * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h |
291 | * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE | |
292 | * to be modified. | |
ee1b5b16 BD |
293 | */ |
294 | if (c->x86 == 5 && c->x86_model == 9) { | |
295 | pr_info("Disabling PGE capability bit\n"); | |
296 | setup_clear_cpu_cap(X86_FEATURE_PGE); | |
297 | } | |
1f12e32f TG |
298 | |
299 | if (c->cpuid_level >= 0x00000001) { | |
300 | u32 eax, ebx, ecx, edx; | |
301 | ||
302 | cpuid(0x00000001, &eax, &ebx, &ecx, &edx); | |
303 | /* | |
304 | * If HTT (EDX[28]) is set EBX[16:23] contain the number of | |
305 | * apicids which are reserved per package. Store the resulting | |
306 | * shift value for the package management code. | |
307 | */ | |
308 | if (edx & (1U << 28)) | |
309 | c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); | |
310 | } | |
0f6ff2bc DH |
311 | |
312 | check_mpx_erratum(c); | |
1da177e4 LT |
313 | } |
314 | ||
185f3b9d | 315 | #ifdef CONFIG_X86_32 |
1da177e4 LT |
316 | /* |
317 | * Early probe support logic for ppro memory erratum #50 | |
318 | * | |
319 | * This is called before we do cpu ident work | |
320 | */ | |
65eb6b43 | 321 | |
148f9bb8 | 322 | int ppro_with_ram_bug(void) |
1da177e4 LT |
323 | { |
324 | /* Uses data from early_cpu_detect now */ | |
325 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | |
326 | boot_cpu_data.x86 == 6 && | |
327 | boot_cpu_data.x86_model == 1 && | |
328 | boot_cpu_data.x86_mask < 8) { | |
1b74dde7 | 329 | pr_info("Pentium Pro with Errata#50 detected. Taking evasive action.\n"); |
1da177e4 LT |
330 | return 1; |
331 | } | |
332 | return 0; | |
333 | } | |
65eb6b43 | 334 | |
148f9bb8 | 335 | static void intel_smp_check(struct cpuinfo_x86 *c) |
1f442d70 | 336 | { |
1f442d70 | 337 | /* calling is from identify_secondary_cpu() ? */ |
f6e9456c | 338 | if (!c->cpu_index) |
1f442d70 YL |
339 | return; |
340 | ||
341 | /* | |
342 | * Mask B, Pentium, but not Pentium MMX | |
343 | */ | |
344 | if (c->x86 == 5 && | |
345 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
346 | c->x86_model <= 3) { | |
347 | /* | |
348 | * Remember we have B step Pentia with bugs | |
349 | */ | |
350 | WARN_ONCE(1, "WARNING: SMP operation may be unreliable" | |
351 | "with B stepping processors.\n"); | |
352 | } | |
1f442d70 YL |
353 | } |
354 | ||
69f2366c CB |
355 | static int forcepae; |
356 | static int __init forcepae_setup(char *__unused) | |
357 | { | |
358 | forcepae = 1; | |
359 | return 1; | |
360 | } | |
361 | __setup("forcepae", forcepae_setup); | |
362 | ||
148f9bb8 | 363 | static void intel_workarounds(struct cpuinfo_x86 *c) |
1da177e4 | 364 | { |
4052704d YL |
365 | #ifdef CONFIG_X86_F00F_BUG |
366 | /* | |
d4e1a0af | 367 | * All models of Pentium and Pentium with MMX technology CPUs |
8bdbd962 | 368 | * have the F0 0F bug, which lets nonprivileged users lock up the |
4eefbe79 | 369 | * system. Announce that the fault handler will be checking for it. |
d4e1a0af | 370 | * The Quark is also family 5, but does not have the same bug. |
4052704d | 371 | */ |
e2604b49 | 372 | clear_cpu_bug(c, X86_BUG_F00F); |
fa392794 | 373 | if (c->x86 == 5 && c->x86_model < 9) { |
4052704d YL |
374 | static int f00f_workaround_enabled; |
375 | ||
e2604b49 | 376 | set_cpu_bug(c, X86_BUG_F00F); |
4052704d | 377 | if (!f00f_workaround_enabled) { |
1b74dde7 | 378 | pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); |
4052704d YL |
379 | f00f_workaround_enabled = 1; |
380 | } | |
381 | } | |
382 | #endif | |
383 | ||
384 | /* | |
385 | * SEP CPUID bug: Pentium Pro reports SEP but doesn't have it until | |
386 | * model 3 mask 3 | |
387 | */ | |
388 | if ((c->x86<<8 | c->x86_model<<4 | c->x86_mask) < 0x633) | |
389 | clear_cpu_cap(c, X86_FEATURE_SEP); | |
390 | ||
69f2366c CB |
391 | /* |
392 | * PAE CPUID issue: many Pentium M report no PAE but may have a | |
393 | * functionally usable PAE implementation. | |
394 | * Forcefully enable PAE if kernel parameter "forcepae" is present. | |
395 | */ | |
396 | if (forcepae) { | |
1b74dde7 | 397 | pr_warn("PAE forced!\n"); |
69f2366c CB |
398 | set_cpu_cap(c, X86_FEATURE_PAE); |
399 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); | |
400 | } | |
401 | ||
4052704d | 402 | /* |
f0133acc | 403 | * P4 Xeon erratum 037 workaround. |
4052704d YL |
404 | * Hardware prefetcher may cause stale data to be loaded into the cache. |
405 | */ | |
1da177e4 | 406 | if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) { |
0b131be8 | 407 | if (msr_set_bit(MSR_IA32_MISC_ENABLE, |
f0133acc | 408 | MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { |
c0a639ad | 409 | pr_info("CPU: C0 stepping P4 Xeon detected.\n"); |
f0133acc | 410 | pr_info("CPU: Disabling hardware prefetching (Erratum 037)\n"); |
1da177e4 LT |
411 | } |
412 | } | |
1da177e4 | 413 | |
4052704d YL |
414 | /* |
415 | * See if we have a good local APIC by checking for buggy Pentia, | |
416 | * i.e. all B steppings and the C2 stepping of P54C when using their | |
417 | * integrated APIC (see 11AP erratum in "Pentium Processor | |
418 | * Specification Update"). | |
419 | */ | |
93984fbd | 420 | if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && |
4052704d | 421 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) |
9b13a93d | 422 | set_cpu_bug(c, X86_BUG_11AP); |
185f3b9d | 423 | |
185f3b9d | 424 | |
4052704d | 425 | #ifdef CONFIG_X86_INTEL_USERCOPY |
185f3b9d | 426 | /* |
4052704d | 427 | * Set up the preferred alignment for movsl bulk memory moves |
185f3b9d | 428 | */ |
4052704d YL |
429 | switch (c->x86) { |
430 | case 4: /* 486: untested */ | |
431 | break; | |
432 | case 5: /* Old Pentia: untested */ | |
433 | break; | |
434 | case 6: /* PII/PIII only like movsl with 8-byte alignment */ | |
435 | movsl_mask.mask = 7; | |
436 | break; | |
437 | case 15: /* P4 is OK down to 8-byte alignment */ | |
438 | movsl_mask.mask = 7; | |
439 | break; | |
440 | } | |
185f3b9d | 441 | #endif |
4052704d | 442 | |
1f442d70 | 443 | intel_smp_check(c); |
4052704d YL |
444 | } |
445 | #else | |
148f9bb8 | 446 | static void intel_workarounds(struct cpuinfo_x86 *c) |
4052704d YL |
447 | { |
448 | } | |
185f3b9d YL |
449 | #endif |
450 | ||
148f9bb8 | 451 | static void srat_detect_node(struct cpuinfo_x86 *c) |
185f3b9d | 452 | { |
645a7919 | 453 | #ifdef CONFIG_NUMA |
185f3b9d YL |
454 | unsigned node; |
455 | int cpu = smp_processor_id(); | |
185f3b9d YL |
456 | |
457 | /* Don't do the funky fallback heuristics the AMD version employs | |
458 | for now. */ | |
bbc9e2f4 | 459 | node = numa_cpu_node(cpu); |
50f2d7f6 | 460 | if (node == NUMA_NO_NODE || !node_online(node)) { |
d9c2d5ac YL |
461 | /* reuse the value from init_cpu_to_node() */ |
462 | node = cpu_to_node(cpu); | |
463 | } | |
185f3b9d | 464 | numa_set_node(cpu, node); |
185f3b9d YL |
465 | #endif |
466 | } | |
467 | ||
3dd9d514 AK |
468 | /* |
469 | * find out the number of processor cores on the die | |
470 | */ | |
148f9bb8 | 471 | static int intel_num_cpu_cores(struct cpuinfo_x86 *c) |
3dd9d514 | 472 | { |
f2ab4461 | 473 | unsigned int eax, ebx, ecx, edx; |
3dd9d514 | 474 | |
8d415ee2 | 475 | if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4) |
3dd9d514 AK |
476 | return 1; |
477 | ||
f2ab4461 ZA |
478 | /* Intel has a non-standard dependency on %ecx for this CPUID level. */ |
479 | cpuid_count(4, 0, &eax, &ebx, &ecx, &edx); | |
3dd9d514 | 480 | if (eax & 0x1f) |
8bdbd962 | 481 | return (eax >> 26) + 1; |
3dd9d514 AK |
482 | else |
483 | return 1; | |
484 | } | |
485 | ||
148f9bb8 | 486 | static void detect_vmx_virtcap(struct cpuinfo_x86 *c) |
e38e05a8 SY |
487 | { |
488 | /* Intel VMX MSR indicated features */ | |
489 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | |
490 | #define X86_VMX_FEATURE_PROC_CTLS_VNMI 0x00400000 | |
491 | #define X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS 0x80000000 | |
492 | #define X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC 0x00000001 | |
493 | #define X86_VMX_FEATURE_PROC_CTLS2_EPT 0x00000002 | |
494 | #define X86_VMX_FEATURE_PROC_CTLS2_VPID 0x00000020 | |
495 | ||
496 | u32 vmx_msr_low, vmx_msr_high, msr_ctl, msr_ctl2; | |
497 | ||
498 | clear_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
499 | clear_cpu_cap(c, X86_FEATURE_VNMI); | |
500 | clear_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
501 | clear_cpu_cap(c, X86_FEATURE_EPT); | |
502 | clear_cpu_cap(c, X86_FEATURE_VPID); | |
503 | ||
504 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, vmx_msr_low, vmx_msr_high); | |
505 | msr_ctl = vmx_msr_high | vmx_msr_low; | |
506 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW) | |
507 | set_cpu_cap(c, X86_FEATURE_TPR_SHADOW); | |
508 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_VNMI) | |
509 | set_cpu_cap(c, X86_FEATURE_VNMI); | |
510 | if (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_2ND_CTLS) { | |
511 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | |
512 | vmx_msr_low, vmx_msr_high); | |
513 | msr_ctl2 = vmx_msr_high | vmx_msr_low; | |
514 | if ((msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VIRT_APIC) && | |
515 | (msr_ctl & X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW)) | |
516 | set_cpu_cap(c, X86_FEATURE_FLEXPRIORITY); | |
517 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_EPT) | |
518 | set_cpu_cap(c, X86_FEATURE_EPT); | |
519 | if (msr_ctl2 & X86_VMX_FEATURE_PROC_CTLS2_VPID) | |
520 | set_cpu_cap(c, X86_FEATURE_VPID); | |
521 | } | |
522 | } | |
523 | ||
b51ef52d LA |
524 | static void init_intel_energy_perf(struct cpuinfo_x86 *c) |
525 | { | |
526 | u64 epb; | |
527 | ||
528 | /* | |
529 | * Initialize MSR_IA32_ENERGY_PERF_BIAS if not already initialized. | |
530 | * (x86_energy_perf_policy(8) is available to change it at run-time.) | |
531 | */ | |
532 | if (!cpu_has(c, X86_FEATURE_EPB)) | |
533 | return; | |
534 | ||
535 | rdmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); | |
536 | if ((epb & 0xF) != ENERGY_PERF_BIAS_PERFORMANCE) | |
537 | return; | |
538 | ||
539 | pr_warn_once("ENERGY_PERF_BIAS: Set to 'normal', was 'performance'\n"); | |
540 | pr_warn_once("ENERGY_PERF_BIAS: View and update with x86_energy_perf_policy(8)\n"); | |
541 | epb = (epb & ~0xF) | ENERGY_PERF_BIAS_NORMAL; | |
542 | wrmsrl(MSR_IA32_ENERGY_PERF_BIAS, epb); | |
543 | } | |
544 | ||
545 | static void intel_bsp_resume(struct cpuinfo_x86 *c) | |
546 | { | |
547 | /* | |
548 | * MSR_IA32_ENERGY_PERF_BIAS is lost across suspend/resume, | |
549 | * so reinitialize it properly like during bootup: | |
550 | */ | |
551 | init_intel_energy_perf(c); | |
552 | } | |
553 | ||
90218ac7 KH |
554 | static void init_cpuid_fault(struct cpuinfo_x86 *c) |
555 | { | |
556 | u64 msr; | |
557 | ||
558 | if (!rdmsrl_safe(MSR_PLATFORM_INFO, &msr)) { | |
559 | if (msr & MSR_PLATFORM_INFO_CPUID_FAULT) | |
560 | set_cpu_cap(c, X86_FEATURE_CPUID_FAULT); | |
561 | } | |
562 | } | |
563 | ||
564 | static void init_intel_misc_features(struct cpuinfo_x86 *c) | |
565 | { | |
566 | u64 msr; | |
567 | ||
568 | if (rdmsrl_safe(MSR_MISC_FEATURES_ENABLES, &msr)) | |
569 | return; | |
570 | ||
e9ea1e7f KH |
571 | /* Clear all MISC features */ |
572 | this_cpu_write(msr_misc_features_shadow, 0); | |
573 | ||
574 | /* Check features and update capabilities and shadow control bits */ | |
90218ac7 KH |
575 | init_cpuid_fault(c); |
576 | probe_xeon_phi_r3mwait(c); | |
e9ea1e7f KH |
577 | |
578 | msr = this_cpu_read(msr_misc_features_shadow); | |
579 | wrmsrl(MSR_MISC_FEATURES_ENABLES, msr); | |
90218ac7 KH |
580 | } |
581 | ||
148f9bb8 | 582 | static void init_intel(struct cpuinfo_x86 *c) |
1da177e4 LT |
583 | { |
584 | unsigned int l2 = 0; | |
1da177e4 | 585 | |
2b16a235 AK |
586 | early_init_intel(c); |
587 | ||
4052704d | 588 | intel_workarounds(c); |
1da177e4 | 589 | |
345077cd SS |
590 | /* |
591 | * Detect the extended topology information if available. This | |
592 | * will reinitialise the initial_apicid which will be used | |
593 | * in init_intel_cacheinfo() | |
594 | */ | |
595 | detect_extended_topology(c); | |
596 | ||
2a226155 PZ |
597 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { |
598 | /* | |
599 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | |
600 | * detection. | |
601 | */ | |
602 | c->x86_max_cores = intel_num_cpu_cores(c); | |
603 | #ifdef CONFIG_X86_32 | |
604 | detect_ht(c); | |
605 | #endif | |
606 | } | |
607 | ||
1da177e4 | 608 | l2 = init_intel_cacheinfo(c); |
aece118e BD |
609 | |
610 | /* Detect legacy cache sizes if init_intel_cacheinfo did not */ | |
611 | if (l2 == 0) { | |
612 | cpu_detect_cache_sizes(c); | |
613 | l2 = c->x86_cache_size; | |
614 | } | |
615 | ||
65eb6b43 | 616 | if (c->cpuid_level > 9) { |
0080e667 VP |
617 | unsigned eax = cpuid_eax(10); |
618 | /* Check for version and the number of counters */ | |
619 | if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) | |
d0e95ebd | 620 | set_cpu_cap(c, X86_FEATURE_ARCH_PERFMON); |
0080e667 | 621 | } |
1da177e4 | 622 | |
054efb64 | 623 | if (cpu_has(c, X86_FEATURE_XMM2)) |
4052704d | 624 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
362f924b BP |
625 | |
626 | if (boot_cpu_has(X86_FEATURE_DS)) { | |
4052704d YL |
627 | unsigned int l1; |
628 | rdmsr(MSR_IA32_MISC_ENABLE, l1, l2); | |
629 | if (!(l1 & (1<<11))) | |
630 | set_cpu_cap(c, X86_FEATURE_BTS); | |
631 | if (!(l1 & (1<<12))) | |
632 | set_cpu_cap(c, X86_FEATURE_PEBS); | |
4052704d | 633 | } |
1da177e4 | 634 | |
906bf7fd | 635 | if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && |
40e2d7f9 | 636 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) |
9b13a93d | 637 | set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); |
e736ad54 | 638 | |
08e237fa PZ |
639 | if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && |
640 | ((c->x86_model == INTEL_FAM6_ATOM_GOLDMONT))) | |
641 | set_cpu_bug(c, X86_BUG_MONITOR); | |
642 | ||
4052704d YL |
643 | #ifdef CONFIG_X86_64 |
644 | if (c->x86 == 15) | |
645 | c->x86_cache_alignment = c->x86_clflush_size * 2; | |
646 | if (c->x86 == 6) | |
647 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | |
648 | #else | |
65eb6b43 PC |
649 | /* |
650 | * Names for the Pentium II/Celeron processors | |
651 | * detectable only by also checking the cache size. | |
652 | * Dixon is NOT a Celeron. | |
653 | */ | |
1da177e4 | 654 | if (c->x86 == 6) { |
4052704d YL |
655 | char *p = NULL; |
656 | ||
1da177e4 LT |
657 | switch (c->x86_model) { |
658 | case 5: | |
865be7a8 OZ |
659 | if (l2 == 0) |
660 | p = "Celeron (Covington)"; | |
661 | else if (l2 == 256) | |
662 | p = "Mobile Pentium II (Dixon)"; | |
1da177e4 | 663 | break; |
65eb6b43 | 664 | |
1da177e4 LT |
665 | case 6: |
666 | if (l2 == 128) | |
667 | p = "Celeron (Mendocino)"; | |
668 | else if (c->x86_mask == 0 || c->x86_mask == 5) | |
669 | p = "Celeron-A"; | |
670 | break; | |
65eb6b43 | 671 | |
1da177e4 LT |
672 | case 8: |
673 | if (l2 == 128) | |
674 | p = "Celeron (Coppermine)"; | |
675 | break; | |
676 | } | |
1da177e4 | 677 | |
4052704d YL |
678 | if (p) |
679 | strcpy(c->x86_model_id, p); | |
1da177e4 | 680 | } |
1da177e4 | 681 | |
185f3b9d YL |
682 | if (c->x86 == 15) |
683 | set_cpu_cap(c, X86_FEATURE_P4); | |
684 | if (c->x86 == 6) | |
685 | set_cpu_cap(c, X86_FEATURE_P3); | |
f4166c54 | 686 | #endif |
185f3b9d | 687 | |
185f3b9d | 688 | /* Work around errata */ |
2759c328 | 689 | srat_detect_node(c); |
e38e05a8 SY |
690 | |
691 | if (cpu_has(c, X86_FEATURE_VMX)) | |
692 | detect_vmx_virtcap(c); | |
abe48b10 | 693 | |
b51ef52d | 694 | init_intel_energy_perf(c); |
e16fd002 | 695 | |
90218ac7 | 696 | init_intel_misc_features(c); |
42ed458a | 697 | } |
1da177e4 | 698 | |
185f3b9d | 699 | #ifdef CONFIG_X86_32 |
148f9bb8 | 700 | static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
1da177e4 | 701 | { |
65eb6b43 PC |
702 | /* |
703 | * Intel PIII Tualatin. This comes in two flavours. | |
1da177e4 LT |
704 | * One has 256kb of cache, the other 512. We have no way |
705 | * to determine which, so we use a boottime override | |
706 | * for the 512kb model, and assume 256 otherwise. | |
707 | */ | |
708 | if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) | |
709 | size = 256; | |
aece118e BD |
710 | |
711 | /* | |
712 | * Intel Quark SoC X1000 contains a 4-way set associative | |
713 | * 16K cache with a 16 byte cache line and 256 lines per tag | |
714 | */ | |
715 | if ((c->x86 == 5) && (c->x86_model == 9)) | |
716 | size = 16; | |
1da177e4 LT |
717 | return size; |
718 | } | |
185f3b9d | 719 | #endif |
1da177e4 | 720 | |
e0ba94f1 AS |
721 | #define TLB_INST_4K 0x01 |
722 | #define TLB_INST_4M 0x02 | |
723 | #define TLB_INST_2M_4M 0x03 | |
724 | ||
725 | #define TLB_INST_ALL 0x05 | |
726 | #define TLB_INST_1G 0x06 | |
727 | ||
728 | #define TLB_DATA_4K 0x11 | |
729 | #define TLB_DATA_4M 0x12 | |
730 | #define TLB_DATA_2M_4M 0x13 | |
731 | #define TLB_DATA_4K_4M 0x14 | |
732 | ||
733 | #define TLB_DATA_1G 0x16 | |
734 | ||
735 | #define TLB_DATA0_4K 0x21 | |
736 | #define TLB_DATA0_4M 0x22 | |
737 | #define TLB_DATA0_2M_4M 0x23 | |
738 | ||
739 | #define STLB_4K 0x41 | |
dd360393 | 740 | #define STLB_4K_2M 0x42 |
e0ba94f1 | 741 | |
148f9bb8 | 742 | static const struct _tlb_table intel_tlb_table[] = { |
e0ba94f1 AS |
743 | { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, |
744 | { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, | |
745 | { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, | |
746 | { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" }, | |
747 | { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" }, | |
748 | { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" }, | |
749 | { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages */" }, | |
750 | { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, | |
751 | { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, | |
752 | { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" }, | |
753 | { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, | |
754 | { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" }, | |
755 | { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" }, | |
756 | { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" }, | |
757 | { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" }, | |
758 | { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" }, | |
759 | { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" }, | |
760 | { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" }, | |
dd360393 KS |
761 | { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" }, |
762 | { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" }, | |
763 | { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" }, | |
e0ba94f1 AS |
764 | { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" }, |
765 | { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" }, | |
766 | { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, | |
767 | { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, | |
768 | { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, | |
a927792c YG |
769 | { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, |
770 | { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, | |
e0ba94f1 AS |
771 | { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, |
772 | { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, | |
dd360393 KS |
773 | { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, |
774 | { 0xc2, TLB_DATA_2M_4M, 16, " DTLB 2 MByte/4MByte pages, 4-way associative" }, | |
e0ba94f1 AS |
775 | { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" }, |
776 | { 0x00, 0, 0 } | |
777 | }; | |
778 | ||
148f9bb8 | 779 | static void intel_tlb_lookup(const unsigned char desc) |
e0ba94f1 AS |
780 | { |
781 | unsigned char k; | |
782 | if (desc == 0) | |
783 | return; | |
784 | ||
785 | /* look up this descriptor in the table */ | |
786 | for (k = 0; intel_tlb_table[k].descriptor != desc && \ | |
787 | intel_tlb_table[k].descriptor != 0; k++) | |
788 | ; | |
789 | ||
790 | if (intel_tlb_table[k].tlb_type == 0) | |
791 | return; | |
792 | ||
793 | switch (intel_tlb_table[k].tlb_type) { | |
794 | case STLB_4K: | |
795 | if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) | |
796 | tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; | |
797 | if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) | |
798 | tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; | |
799 | break; | |
dd360393 KS |
800 | case STLB_4K_2M: |
801 | if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) | |
802 | tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; | |
803 | if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) | |
804 | tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; | |
805 | if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) | |
806 | tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; | |
807 | if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) | |
808 | tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; | |
809 | if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) | |
810 | tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; | |
811 | if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) | |
812 | tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; | |
813 | break; | |
e0ba94f1 AS |
814 | case TLB_INST_ALL: |
815 | if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) | |
816 | tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; | |
817 | if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) | |
818 | tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; | |
819 | if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) | |
820 | tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; | |
821 | break; | |
822 | case TLB_INST_4K: | |
823 | if (tlb_lli_4k[ENTRIES] < intel_tlb_table[k].entries) | |
824 | tlb_lli_4k[ENTRIES] = intel_tlb_table[k].entries; | |
825 | break; | |
826 | case TLB_INST_4M: | |
827 | if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) | |
828 | tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; | |
829 | break; | |
830 | case TLB_INST_2M_4M: | |
831 | if (tlb_lli_2m[ENTRIES] < intel_tlb_table[k].entries) | |
832 | tlb_lli_2m[ENTRIES] = intel_tlb_table[k].entries; | |
833 | if (tlb_lli_4m[ENTRIES] < intel_tlb_table[k].entries) | |
834 | tlb_lli_4m[ENTRIES] = intel_tlb_table[k].entries; | |
835 | break; | |
836 | case TLB_DATA_4K: | |
837 | case TLB_DATA0_4K: | |
838 | if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) | |
839 | tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; | |
840 | break; | |
841 | case TLB_DATA_4M: | |
842 | case TLB_DATA0_4M: | |
843 | if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) | |
844 | tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; | |
845 | break; | |
846 | case TLB_DATA_2M_4M: | |
847 | case TLB_DATA0_2M_4M: | |
848 | if (tlb_lld_2m[ENTRIES] < intel_tlb_table[k].entries) | |
849 | tlb_lld_2m[ENTRIES] = intel_tlb_table[k].entries; | |
850 | if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) | |
851 | tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; | |
852 | break; | |
853 | case TLB_DATA_4K_4M: | |
854 | if (tlb_lld_4k[ENTRIES] < intel_tlb_table[k].entries) | |
855 | tlb_lld_4k[ENTRIES] = intel_tlb_table[k].entries; | |
856 | if (tlb_lld_4m[ENTRIES] < intel_tlb_table[k].entries) | |
857 | tlb_lld_4m[ENTRIES] = intel_tlb_table[k].entries; | |
858 | break; | |
dd360393 KS |
859 | case TLB_DATA_1G: |
860 | if (tlb_lld_1g[ENTRIES] < intel_tlb_table[k].entries) | |
861 | tlb_lld_1g[ENTRIES] = intel_tlb_table[k].entries; | |
e0ba94f1 AS |
862 | break; |
863 | } | |
864 | } | |
865 | ||
148f9bb8 | 866 | static void intel_detect_tlb(struct cpuinfo_x86 *c) |
e0ba94f1 AS |
867 | { |
868 | int i, j, n; | |
869 | unsigned int regs[4]; | |
870 | unsigned char *desc = (unsigned char *)regs; | |
5b556332 BP |
871 | |
872 | if (c->cpuid_level < 2) | |
873 | return; | |
874 | ||
e0ba94f1 AS |
875 | /* Number of times to iterate */ |
876 | n = cpuid_eax(2) & 0xFF; | |
877 | ||
878 | for (i = 0 ; i < n ; i++) { | |
879 | cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]); | |
880 | ||
881 | /* If bit 31 is set, this is an unknown format */ | |
882 | for (j = 0 ; j < 3 ; j++) | |
883 | if (regs[j] & (1 << 31)) | |
884 | regs[j] = 0; | |
885 | ||
886 | /* Byte 0 is level count, not a descriptor */ | |
887 | for (j = 1 ; j < 16 ; j++) | |
888 | intel_tlb_lookup(desc[j]); | |
889 | } | |
890 | } | |
891 | ||
148f9bb8 | 892 | static const struct cpu_dev intel_cpu_dev = { |
1da177e4 | 893 | .c_vendor = "Intel", |
65eb6b43 | 894 | .c_ident = { "GenuineIntel" }, |
185f3b9d | 895 | #ifdef CONFIG_X86_32 |
09dc68d9 JB |
896 | .legacy_models = { |
897 | { .family = 4, .model_names = | |
65eb6b43 PC |
898 | { |
899 | [0] = "486 DX-25/33", | |
900 | [1] = "486 DX-50", | |
901 | [2] = "486 SX", | |
902 | [3] = "486 DX/2", | |
903 | [4] = "486 SL", | |
904 | [5] = "486 SX/2", | |
905 | [7] = "486 DX/2-WB", | |
906 | [8] = "486 DX/4", | |
1da177e4 LT |
907 | [9] = "486 DX/4-WB" |
908 | } | |
909 | }, | |
09dc68d9 | 910 | { .family = 5, .model_names = |
65eb6b43 PC |
911 | { |
912 | [0] = "Pentium 60/66 A-step", | |
913 | [1] = "Pentium 60/66", | |
1da177e4 | 914 | [2] = "Pentium 75 - 200", |
65eb6b43 | 915 | [3] = "OverDrive PODP5V83", |
1da177e4 | 916 | [4] = "Pentium MMX", |
65eb6b43 | 917 | [7] = "Mobile Pentium 75 - 200", |
aece118e BD |
918 | [8] = "Mobile Pentium MMX", |
919 | [9] = "Quark SoC X1000", | |
1da177e4 LT |
920 | } |
921 | }, | |
09dc68d9 | 922 | { .family = 6, .model_names = |
65eb6b43 | 923 | { |
1da177e4 | 924 | [0] = "Pentium Pro A-step", |
65eb6b43 PC |
925 | [1] = "Pentium Pro", |
926 | [3] = "Pentium II (Klamath)", | |
927 | [4] = "Pentium II (Deschutes)", | |
928 | [5] = "Pentium II (Deschutes)", | |
1da177e4 | 929 | [6] = "Mobile Pentium II", |
65eb6b43 PC |
930 | [7] = "Pentium III (Katmai)", |
931 | [8] = "Pentium III (Coppermine)", | |
1da177e4 LT |
932 | [10] = "Pentium III (Cascades)", |
933 | [11] = "Pentium III (Tualatin)", | |
934 | } | |
935 | }, | |
09dc68d9 | 936 | { .family = 15, .model_names = |
1da177e4 LT |
937 | { |
938 | [0] = "Pentium 4 (Unknown)", | |
939 | [1] = "Pentium 4 (Willamette)", | |
940 | [2] = "Pentium 4 (Northwood)", | |
941 | [4] = "Pentium 4 (Foster)", | |
942 | [5] = "Pentium 4 (Foster)", | |
943 | } | |
944 | }, | |
945 | }, | |
09dc68d9 | 946 | .legacy_cache_size = intel_size_cache, |
185f3b9d | 947 | #endif |
e0ba94f1 | 948 | .c_detect_tlb = intel_detect_tlb, |
03ae5768 | 949 | .c_early_init = early_init_intel, |
1da177e4 | 950 | .c_init = init_intel, |
b51ef52d | 951 | .c_bsp_resume = intel_bsp_resume, |
10a434fc | 952 | .c_x86_vendor = X86_VENDOR_INTEL, |
1da177e4 LT |
953 | }; |
954 | ||
10a434fc | 955 | cpu_dev_register(intel_cpu_dev); |
1da177e4 | 956 |