Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
6b44e72a | 2 | * Intel CPU Microcode Update Driver for Linux |
1da177e4 | 3 | * |
cea58224 | 4 | * Copyright (C) 2000-2006 Tigran Aivazian <aivazian.tigran@gmail.com> |
6b44e72a | 5 | * 2006 Shaohua Li <shaohua.li@intel.com> |
1da177e4 | 6 | * |
fe055896 BP |
7 | * Intel CPU microcode early update for Linux |
8 | * | |
9 | * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com> | |
10 | * H Peter Anvin" <hpa@zytor.com> | |
11 | * | |
6b44e72a BP |
12 | * This program is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
1da177e4 | 16 | */ |
f58e1f53 | 17 | |
fe055896 BP |
18 | /* |
19 | * This needs to be before all headers so that pr_debug in printk.h doesn't turn | |
20 | * printk calls into no_printk(). | |
21 | * | |
22 | *#define DEBUG | |
23 | */ | |
6b26e1bf | 24 | #define pr_fmt(fmt) "microcode: " fmt |
f58e1f53 | 25 | |
fe055896 | 26 | #include <linux/earlycpio.h> |
4bae1967 | 27 | #include <linux/firmware.h> |
4bae1967 | 28 | #include <linux/uaccess.h> |
fe055896 BP |
29 | #include <linux/vmalloc.h> |
30 | #include <linux/initrd.h> | |
4bae1967 | 31 | #include <linux/kernel.h> |
fe055896 BP |
32 | #include <linux/slab.h> |
33 | #include <linux/cpu.h> | |
34 | #include <linux/mm.h> | |
1da177e4 | 35 | |
9cd4d78e | 36 | #include <asm/microcode_intel.h> |
723f2828 | 37 | #include <asm/intel-family.h> |
4bae1967 | 38 | #include <asm/processor.h> |
fe055896 BP |
39 | #include <asm/tlbflush.h> |
40 | #include <asm/setup.h> | |
4bae1967 | 41 | #include <asm/msr.h> |
1da177e4 | 42 | |
06b8534c | 43 | static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; |
fe055896 | 44 | |
c26665ab | 45 | /* Current microcode patch used in early patching on the APs. */ |
d7f7dc7b | 46 | static struct microcode_intel *intel_ucode_patch; |
6c545647 | 47 | |
7e702d17 JZ |
48 | /* last level cache size per core */ |
49 | static int llc_size_per_core; | |
50 | ||
8027923a BP |
51 | static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1, |
52 | unsigned int s2, unsigned int p2) | |
53 | { | |
54 | if (s1 != s2) | |
55 | return false; | |
56 | ||
57 | /* Processor flags are either both 0 ... */ | |
58 | if (!p1 && !p2) | |
59 | return true; | |
60 | ||
61 | /* ... or they intersect. */ | |
62 | return p1 & p2; | |
63 | } | |
64 | ||
65 | /* | |
66 | * Returns 1 if update has been found, 0 otherwise. | |
67 | */ | |
68 | static int find_matching_signature(void *mc, unsigned int csig, int cpf) | |
69 | { | |
70 | struct microcode_header_intel *mc_hdr = mc; | |
71 | struct extended_sigtable *ext_hdr; | |
72 | struct extended_signature *ext_sig; | |
73 | int i; | |
74 | ||
75 | if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) | |
76 | return 1; | |
77 | ||
78 | /* Look for ext. headers: */ | |
79 | if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE) | |
80 | return 0; | |
81 | ||
82 | ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE; | |
83 | ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; | |
84 | ||
85 | for (i = 0; i < ext_hdr->count; i++) { | |
86 | if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) | |
87 | return 1; | |
88 | ext_sig++; | |
89 | } | |
90 | return 0; | |
91 | } | |
92 | ||
93 | /* | |
94 | * Returns 1 if update has been found, 0 otherwise. | |
95 | */ | |
96 | static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) | |
97 | { | |
98 | struct microcode_header_intel *mc_hdr = mc; | |
99 | ||
100 | if (mc_hdr->rev <= new_rev) | |
101 | return 0; | |
102 | ||
103 | return find_matching_signature(mc, csig, cpf); | |
104 | } | |
105 | ||
fe055896 BP |
106 | /* |
107 | * Given CPU signature and a microcode patch, this function finds if the | |
108 | * microcode patch has matching family and model with the CPU. | |
06b8534c BP |
109 | * |
110 | * %true - if there's a match | |
111 | * %false - otherwise | |
fe055896 | 112 | */ |
06b8534c BP |
113 | static bool microcode_matches(struct microcode_header_intel *mc_header, |
114 | unsigned long sig) | |
fe055896 | 115 | { |
fe055896 BP |
116 | unsigned long total_size = get_totalsize(mc_header); |
117 | unsigned long data_size = get_datasize(mc_header); | |
06b8534c BP |
118 | struct extended_sigtable *ext_header; |
119 | unsigned int fam_ucode, model_ucode; | |
fe055896 | 120 | struct extended_signature *ext_sig; |
06b8534c BP |
121 | unsigned int fam, model; |
122 | int ext_sigcount, i; | |
fe055896 | 123 | |
99f925ce | 124 | fam = x86_family(sig); |
fe055896 BP |
125 | model = x86_model(sig); |
126 | ||
99f925ce | 127 | fam_ucode = x86_family(mc_header->sig); |
fe055896 BP |
128 | model_ucode = x86_model(mc_header->sig); |
129 | ||
130 | if (fam == fam_ucode && model == model_ucode) | |
06b8534c | 131 | return true; |
fe055896 BP |
132 | |
133 | /* Look for ext. headers: */ | |
134 | if (total_size <= data_size + MC_HEADER_SIZE) | |
06b8534c | 135 | return false; |
fe055896 BP |
136 | |
137 | ext_header = (void *) mc_header + data_size + MC_HEADER_SIZE; | |
138 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | |
139 | ext_sigcount = ext_header->count; | |
140 | ||
141 | for (i = 0; i < ext_sigcount; i++) { | |
99f925ce | 142 | fam_ucode = x86_family(ext_sig->sig); |
fe055896 BP |
143 | model_ucode = x86_model(ext_sig->sig); |
144 | ||
145 | if (fam == fam_ucode && model == model_ucode) | |
06b8534c | 146 | return true; |
fe055896 BP |
147 | |
148 | ext_sig++; | |
149 | } | |
06b8534c | 150 | return false; |
fe055896 BP |
151 | } |
152 | ||
aa78c1cc | 153 | static struct ucode_patch *memdup_patch(void *data, unsigned int size) |
fe055896 | 154 | { |
06b8534c | 155 | struct ucode_patch *p; |
fe055896 | 156 | |
9fcf5ba2 | 157 | p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); |
06b8534c | 158 | if (!p) |
aa78c1cc | 159 | return NULL; |
fe055896 | 160 | |
06b8534c BP |
161 | p->data = kmemdup(data, size, GFP_KERNEL); |
162 | if (!p->data) { | |
163 | kfree(p); | |
aa78c1cc | 164 | return NULL; |
fe055896 BP |
165 | } |
166 | ||
06b8534c | 167 | return p; |
fe055896 BP |
168 | } |
169 | ||
06b8534c | 170 | static void save_microcode_patch(void *data, unsigned int size) |
fe055896 BP |
171 | { |
172 | struct microcode_header_intel *mc_hdr, *mc_saved_hdr; | |
bd207330 | 173 | struct ucode_patch *iter, *tmp, *p = NULL; |
06b8534c | 174 | bool prev_found = false; |
fe055896 | 175 | unsigned int sig, pf; |
fe055896 | 176 | |
06b8534c | 177 | mc_hdr = (struct microcode_header_intel *)data; |
fe055896 | 178 | |
06b8534c BP |
179 | list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { |
180 | mc_saved_hdr = (struct microcode_header_intel *)iter->data; | |
fe055896 BP |
181 | sig = mc_saved_hdr->sig; |
182 | pf = mc_saved_hdr->pf; | |
183 | ||
06b8534c BP |
184 | if (find_matching_signature(data, sig, pf)) { |
185 | prev_found = true; | |
fe055896 | 186 | |
06b8534c BP |
187 | if (mc_hdr->rev <= mc_saved_hdr->rev) |
188 | continue; | |
fe055896 | 189 | |
aa78c1cc BP |
190 | p = memdup_patch(data, size); |
191 | if (!p) | |
06b8534c BP |
192 | pr_err("Error allocating buffer %p\n", data); |
193 | else | |
194 | list_replace(&iter->plist, &p->plist); | |
195 | } | |
fe055896 BP |
196 | } |
197 | ||
06b8534c BP |
198 | /* |
199 | * There weren't any previous patches found in the list cache; save the | |
200 | * newly found. | |
201 | */ | |
202 | if (!prev_found) { | |
aa78c1cc BP |
203 | p = memdup_patch(data, size); |
204 | if (!p) | |
06b8534c BP |
205 | pr_err("Error allocating buffer for %p\n", data); |
206 | else | |
207 | list_add_tail(&p->plist, µcode_cache); | |
208 | } | |
bd207330 | 209 | |
aa78c1cc BP |
210 | if (!p) |
211 | return; | |
212 | ||
bd207330 BP |
213 | /* |
214 | * Save for early loading. On 32-bit, that needs to be a physical | |
215 | * address as the APs are running from physical addresses, before | |
216 | * paging has been enabled. | |
217 | */ | |
aa78c1cc BP |
218 | if (IS_ENABLED(CONFIG_X86_32)) |
219 | intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); | |
220 | else | |
221 | intel_ucode_patch = p->data; | |
fe055896 BP |
222 | } |
223 | ||
8027923a BP |
224 | static int microcode_sanity_check(void *mc, int print_err) |
225 | { | |
226 | unsigned long total_size, data_size, ext_table_size; | |
227 | struct microcode_header_intel *mc_header = mc; | |
228 | struct extended_sigtable *ext_header = NULL; | |
229 | u32 sum, orig_sum, ext_sigcount = 0, i; | |
230 | struct extended_signature *ext_sig; | |
231 | ||
232 | total_size = get_totalsize(mc_header); | |
233 | data_size = get_datasize(mc_header); | |
234 | ||
235 | if (data_size + MC_HEADER_SIZE > total_size) { | |
236 | if (print_err) | |
237 | pr_err("Error: bad microcode data file size.\n"); | |
238 | return -EINVAL; | |
239 | } | |
240 | ||
241 | if (mc_header->ldrver != 1 || mc_header->hdrver != 1) { | |
242 | if (print_err) | |
243 | pr_err("Error: invalid/unknown microcode update format.\n"); | |
244 | return -EINVAL; | |
245 | } | |
246 | ||
247 | ext_table_size = total_size - (MC_HEADER_SIZE + data_size); | |
248 | if (ext_table_size) { | |
249 | u32 ext_table_sum = 0; | |
250 | u32 *ext_tablep; | |
251 | ||
252 | if ((ext_table_size < EXT_HEADER_SIZE) | |
253 | || ((ext_table_size - EXT_HEADER_SIZE) % EXT_SIGNATURE_SIZE)) { | |
254 | if (print_err) | |
255 | pr_err("Error: truncated extended signature table.\n"); | |
256 | return -EINVAL; | |
257 | } | |
258 | ||
259 | ext_header = mc + MC_HEADER_SIZE + data_size; | |
260 | if (ext_table_size != exttable_size(ext_header)) { | |
261 | if (print_err) | |
262 | pr_err("Error: extended signature table size mismatch.\n"); | |
263 | return -EFAULT; | |
264 | } | |
265 | ||
266 | ext_sigcount = ext_header->count; | |
267 | ||
268 | /* | |
269 | * Check extended table checksum: the sum of all dwords that | |
270 | * comprise a valid table must be 0. | |
271 | */ | |
272 | ext_tablep = (u32 *)ext_header; | |
273 | ||
274 | i = ext_table_size / sizeof(u32); | |
275 | while (i--) | |
276 | ext_table_sum += ext_tablep[i]; | |
277 | ||
278 | if (ext_table_sum) { | |
279 | if (print_err) | |
280 | pr_warn("Bad extended signature table checksum, aborting.\n"); | |
281 | return -EINVAL; | |
282 | } | |
283 | } | |
284 | ||
285 | /* | |
286 | * Calculate the checksum of update data and header. The checksum of | |
287 | * valid update data and header including the extended signature table | |
288 | * must be 0. | |
289 | */ | |
290 | orig_sum = 0; | |
291 | i = (MC_HEADER_SIZE + data_size) / sizeof(u32); | |
292 | while (i--) | |
293 | orig_sum += ((u32 *)mc)[i]; | |
294 | ||
295 | if (orig_sum) { | |
296 | if (print_err) | |
297 | pr_err("Bad microcode data checksum, aborting.\n"); | |
298 | return -EINVAL; | |
299 | } | |
300 | ||
301 | if (!ext_table_size) | |
302 | return 0; | |
303 | ||
304 | /* | |
305 | * Check extended signature checksum: 0 => valid. | |
306 | */ | |
307 | for (i = 0; i < ext_sigcount; i++) { | |
308 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE + | |
309 | EXT_SIGNATURE_SIZE * i; | |
310 | ||
311 | sum = (mc_header->sig + mc_header->pf + mc_header->cksum) - | |
312 | (ext_sig->sig + ext_sig->pf + ext_sig->cksum); | |
313 | if (sum) { | |
314 | if (print_err) | |
315 | pr_err("Bad extended signature checksum, aborting.\n"); | |
316 | return -EINVAL; | |
317 | } | |
318 | } | |
319 | return 0; | |
320 | } | |
321 | ||
fe055896 BP |
322 | /* |
323 | * Get microcode matching with BSP's model. Only CPUs with the same model as | |
324 | * BSP can stay in the platform. | |
325 | */ | |
06b8534c BP |
326 | static struct microcode_intel * |
327 | scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) | |
fe055896 | 328 | { |
f96fde53 | 329 | struct microcode_header_intel *mc_header; |
06b8534c | 330 | struct microcode_intel *patch = NULL; |
f96fde53 | 331 | unsigned int mc_size; |
fe055896 | 332 | |
06b8534c BP |
333 | while (size) { |
334 | if (size < sizeof(struct microcode_header_intel)) | |
fe055896 BP |
335 | break; |
336 | ||
06b8534c | 337 | mc_header = (struct microcode_header_intel *)data; |
fe055896 BP |
338 | |
339 | mc_size = get_totalsize(mc_header); | |
06b8534c BP |
340 | if (!mc_size || |
341 | mc_size > size || | |
342 | microcode_sanity_check(data, 0) < 0) | |
fe055896 BP |
343 | break; |
344 | ||
06b8534c | 345 | size -= mc_size; |
fe055896 | 346 | |
06b8534c BP |
347 | if (!microcode_matches(mc_header, uci->cpu_sig.sig)) { |
348 | data += mc_size; | |
fe055896 BP |
349 | continue; |
350 | } | |
351 | ||
06b8534c BP |
352 | if (save) { |
353 | save_microcode_patch(data, mc_size); | |
354 | goto next; | |
355 | } | |
fe055896 | 356 | |
fe055896 | 357 | |
06b8534c BP |
358 | if (!patch) { |
359 | if (!has_newer_microcode(data, | |
360 | uci->cpu_sig.sig, | |
361 | uci->cpu_sig.pf, | |
362 | uci->cpu_sig.rev)) | |
363 | goto next; | |
fe055896 | 364 | |
06b8534c BP |
365 | } else { |
366 | struct microcode_header_intel *phdr = &patch->hdr; | |
367 | ||
368 | if (!has_newer_microcode(data, | |
369 | phdr->sig, | |
370 | phdr->pf, | |
371 | phdr->rev)) | |
372 | goto next; | |
373 | } | |
fe055896 | 374 | |
06b8534c BP |
375 | /* We have a newer patch, save it. */ |
376 | patch = data; | |
fe055896 | 377 | |
06b8534c BP |
378 | next: |
379 | data += mc_size; | |
380 | } | |
f96fde53 | 381 | |
06b8534c BP |
382 | if (size) |
383 | return NULL; | |
384 | ||
385 | return patch; | |
fe055896 BP |
386 | } |
387 | ||
388 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) | |
389 | { | |
390 | unsigned int val[2]; | |
391 | unsigned int family, model; | |
06b8534c | 392 | struct cpu_signature csig = { 0 }; |
fe055896 BP |
393 | unsigned int eax, ebx, ecx, edx; |
394 | ||
fe055896 BP |
395 | memset(uci, 0, sizeof(*uci)); |
396 | ||
397 | eax = 0x00000001; | |
398 | ecx = 0; | |
399 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
400 | csig.sig = eax; | |
401 | ||
06b8534c BP |
402 | family = x86_family(eax); |
403 | model = x86_model(eax); | |
fe055896 BP |
404 | |
405 | if ((model >= 5) || (family > 6)) { | |
406 | /* get processor flags from MSR 0x17 */ | |
407 | native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | |
408 | csig.pf = 1 << ((val[1] >> 18) & 7); | |
409 | } | |
fe055896 | 410 | |
4167709b | 411 | csig.rev = intel_get_microcode_revision(); |
fe055896 BP |
412 | |
413 | uci->cpu_sig = csig; | |
414 | uci->valid = 1; | |
415 | ||
416 | return 0; | |
417 | } | |
418 | ||
fe055896 BP |
419 | static void show_saved_mc(void) |
420 | { | |
c595ac2b | 421 | #ifdef DEBUG |
06b8534c | 422 | int i = 0, j; |
fe055896 BP |
423 | unsigned int sig, pf, rev, total_size, data_size, date; |
424 | struct ucode_cpu_info uci; | |
06b8534c | 425 | struct ucode_patch *p; |
fe055896 | 426 | |
06b8534c | 427 | if (list_empty(µcode_cache)) { |
fe055896 BP |
428 | pr_debug("no microcode data saved.\n"); |
429 | return; | |
430 | } | |
fe055896 BP |
431 | |
432 | collect_cpu_info_early(&uci); | |
433 | ||
06b8534c BP |
434 | sig = uci.cpu_sig.sig; |
435 | pf = uci.cpu_sig.pf; | |
436 | rev = uci.cpu_sig.rev; | |
fe055896 BP |
437 | pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); |
438 | ||
06b8534c | 439 | list_for_each_entry(p, µcode_cache, plist) { |
fe055896 BP |
440 | struct microcode_header_intel *mc_saved_header; |
441 | struct extended_sigtable *ext_header; | |
fe055896 | 442 | struct extended_signature *ext_sig; |
06b8534c BP |
443 | int ext_sigcount; |
444 | ||
445 | mc_saved_header = (struct microcode_header_intel *)p->data; | |
446 | ||
447 | sig = mc_saved_header->sig; | |
448 | pf = mc_saved_header->pf; | |
449 | rev = mc_saved_header->rev; | |
450 | date = mc_saved_header->date; | |
fe055896 | 451 | |
06b8534c BP |
452 | total_size = get_totalsize(mc_saved_header); |
453 | data_size = get_datasize(mc_saved_header); | |
fe055896 | 454 | |
c19ca6cb | 455 | pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, total size=0x%x, date = %04x-%02x-%02x\n", |
06b8534c | 456 | i++, sig, pf, rev, total_size, |
fe055896 BP |
457 | date & 0xffff, |
458 | date >> 24, | |
459 | (date >> 16) & 0xff); | |
460 | ||
461 | /* Look for ext. headers: */ | |
462 | if (total_size <= data_size + MC_HEADER_SIZE) | |
463 | continue; | |
464 | ||
06b8534c | 465 | ext_header = (void *)mc_saved_header + data_size + MC_HEADER_SIZE; |
fe055896 BP |
466 | ext_sigcount = ext_header->count; |
467 | ext_sig = (void *)ext_header + EXT_HEADER_SIZE; | |
468 | ||
469 | for (j = 0; j < ext_sigcount; j++) { | |
470 | sig = ext_sig->sig; | |
471 | pf = ext_sig->pf; | |
472 | ||
473 | pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", | |
474 | j, sig, pf); | |
475 | ||
476 | ext_sig++; | |
477 | } | |
fe055896 | 478 | } |
fe055896 | 479 | #endif |
c595ac2b | 480 | } |
fe055896 | 481 | |
fe055896 | 482 | /* |
06b8534c BP |
483 | * Save this microcode patch. It will be loaded early when a CPU is |
484 | * hot-added or resumes. | |
fe055896 | 485 | */ |
06b8534c | 486 | static void save_mc_for_early(u8 *mc, unsigned int size) |
fe055896 | 487 | { |
0c5fa827 | 488 | #ifdef CONFIG_HOTPLUG_CPU |
9f3cc2a0 | 489 | /* Synchronization during CPU hotplug. */ |
0c5fa827 BP |
490 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); |
491 | ||
fe055896 BP |
492 | mutex_lock(&x86_cpu_microcode_mutex); |
493 | ||
06b8534c | 494 | save_microcode_patch(mc, size); |
fe055896 BP |
495 | show_saved_mc(); |
496 | ||
fe055896 | 497 | mutex_unlock(&x86_cpu_microcode_mutex); |
fe055896 | 498 | #endif |
0c5fa827 | 499 | } |
fe055896 | 500 | |
06b8534c | 501 | static bool load_builtin_intel_microcode(struct cpio_data *cp) |
fe055896 | 502 | { |
06b8534c | 503 | unsigned int eax = 1, ebx, ecx = 0, edx; |
fe055896 BP |
504 | char name[30]; |
505 | ||
06b8534c BP |
506 | if (IS_ENABLED(CONFIG_X86_32)) |
507 | return false; | |
508 | ||
fe055896 BP |
509 | native_cpuid(&eax, &ebx, &ecx, &edx); |
510 | ||
99f925ce BP |
511 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
512 | x86_family(eax), x86_model(eax), x86_stepping(eax)); | |
fe055896 BP |
513 | |
514 | return get_builtin_firmware(cp, name); | |
fe055896 BP |
515 | } |
516 | ||
fe055896 BP |
517 | /* |
518 | * Print ucode update info. | |
519 | */ | |
520 | static void | |
521 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) | |
522 | { | |
b7f500ae BP |
523 | pr_info_once("microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n", |
524 | uci->cpu_sig.rev, | |
525 | date & 0xffff, | |
526 | date >> 24, | |
527 | (date >> 16) & 0xff); | |
fe055896 BP |
528 | } |
529 | ||
530 | #ifdef CONFIG_X86_32 | |
531 | ||
532 | static int delay_ucode_info; | |
533 | static int current_mc_date; | |
534 | ||
535 | /* | |
536 | * Print early updated ucode info after printk works. This is delayed info dump. | |
537 | */ | |
538 | void show_ucode_info_early(void) | |
539 | { | |
540 | struct ucode_cpu_info uci; | |
541 | ||
542 | if (delay_ucode_info) { | |
543 | collect_cpu_info_early(&uci); | |
544 | print_ucode_info(&uci, current_mc_date); | |
545 | delay_ucode_info = 0; | |
546 | } | |
547 | } | |
548 | ||
549 | /* | |
06b8534c | 550 | * At this point, we can not call printk() yet. Delay printing microcode info in |
fe055896 BP |
551 | * show_ucode_info_early() until printk() works. |
552 | */ | |
553 | static void print_ucode(struct ucode_cpu_info *uci) | |
554 | { | |
de778275 | 555 | struct microcode_intel *mc; |
fe055896 BP |
556 | int *delay_ucode_info_p; |
557 | int *current_mc_date_p; | |
558 | ||
de778275 BP |
559 | mc = uci->mc; |
560 | if (!mc) | |
fe055896 BP |
561 | return; |
562 | ||
563 | delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); | |
564 | current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); | |
565 | ||
566 | *delay_ucode_info_p = 1; | |
de778275 | 567 | *current_mc_date_p = mc->hdr.date; |
fe055896 BP |
568 | } |
569 | #else | |
570 | ||
fe055896 BP |
571 | static inline void print_ucode(struct ucode_cpu_info *uci) |
572 | { | |
de778275 | 573 | struct microcode_intel *mc; |
fe055896 | 574 | |
de778275 BP |
575 | mc = uci->mc; |
576 | if (!mc) | |
fe055896 BP |
577 | return; |
578 | ||
de778275 | 579 | print_ucode_info(uci, mc->hdr.date); |
fe055896 BP |
580 | } |
581 | #endif | |
582 | ||
583 | static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) | |
584 | { | |
de778275 | 585 | struct microcode_intel *mc; |
4167709b | 586 | u32 rev; |
fe055896 | 587 | |
de778275 BP |
588 | mc = uci->mc; |
589 | if (!mc) | |
fe055896 BP |
590 | return 0; |
591 | ||
592 | /* write microcode via MSR 0x79 */ | |
c416e611 | 593 | native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
fe055896 | 594 | |
4167709b BP |
595 | rev = intel_get_microcode_revision(); |
596 | if (rev != mc->hdr.rev) | |
fe055896 BP |
597 | return -1; |
598 | ||
4167709b | 599 | uci->cpu_sig.rev = rev; |
fe055896 BP |
600 | |
601 | if (early) | |
602 | print_ucode(uci); | |
603 | else | |
de778275 | 604 | print_ucode_info(uci, mc->hdr.date); |
fe055896 BP |
605 | |
606 | return 0; | |
607 | } | |
608 | ||
fe055896 BP |
609 | int __init save_microcode_in_initrd_intel(void) |
610 | { | |
06b8534c BP |
611 | struct ucode_cpu_info uci; |
612 | struct cpio_data cp; | |
fe055896 | 613 | |
bd207330 BP |
614 | /* |
615 | * initrd is going away, clear patch ptr. We will scan the microcode one | |
616 | * last time before jettisoning and save a patch, if found. Then we will | |
617 | * update that pointer too, with a stable patch address to use when | |
618 | * resuming the cores. | |
619 | */ | |
620 | intel_ucode_patch = NULL; | |
621 | ||
06b8534c BP |
622 | if (!load_builtin_intel_microcode(&cp)) |
623 | cp = find_microcode_in_initrd(ucode_path, false); | |
fe055896 | 624 | |
06b8534c BP |
625 | if (!(cp.data && cp.size)) |
626 | return 0; | |
fe055896 | 627 | |
06b8534c | 628 | collect_cpu_info_early(&uci); |
6c545647 | 629 | |
06b8534c | 630 | scan_microcode(cp.data, cp.size, &uci, true); |
6c545647 | 631 | |
06b8534c | 632 | show_saved_mc(); |
6c545647 | 633 | |
06b8534c BP |
634 | return 0; |
635 | } | |
6c545647 | 636 | |
06b8534c BP |
637 | /* |
638 | * @res_patch, output: a pointer to the patch we found. | |
639 | */ | |
640 | static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) | |
641 | { | |
642 | static const char *path; | |
643 | struct cpio_data cp; | |
644 | bool use_pa; | |
6c545647 | 645 | |
06b8534c BP |
646 | if (IS_ENABLED(CONFIG_X86_32)) { |
647 | path = (const char *)__pa_nodebug(ucode_path); | |
648 | use_pa = true; | |
649 | } else { | |
650 | path = ucode_path; | |
651 | use_pa = false; | |
6c545647 | 652 | } |
6c545647 | 653 | |
06b8534c BP |
654 | /* try built-in microcode first */ |
655 | if (!load_builtin_intel_microcode(&cp)) | |
656 | cp = find_microcode_in_initrd(path, use_pa); | |
6c545647 | 657 | |
06b8534c BP |
658 | if (!(cp.data && cp.size)) |
659 | return NULL; | |
6c545647 | 660 | |
06b8534c | 661 | collect_cpu_info_early(uci); |
6c545647 | 662 | |
06b8534c | 663 | return scan_microcode(cp.data, cp.size, uci, false); |
6c545647 BP |
664 | } |
665 | ||
06b8534c | 666 | void __init load_ucode_intel_bsp(void) |
fe055896 | 667 | { |
06b8534c | 668 | struct microcode_intel *patch; |
fe055896 | 669 | struct ucode_cpu_info uci; |
fe055896 | 670 | |
06b8534c BP |
671 | patch = __load_ucode_intel(&uci); |
672 | if (!patch) | |
fe055896 BP |
673 | return; |
674 | ||
06b8534c | 675 | uci.mc = patch; |
fe055896 BP |
676 | |
677 | apply_microcode_early(&uci, true); | |
678 | } | |
679 | ||
06b8534c | 680 | void load_ucode_intel_ap(void) |
fe055896 | 681 | { |
06b8534c BP |
682 | struct microcode_intel *patch, **iup; |
683 | struct ucode_cpu_info uci; | |
264285ac | 684 | |
06b8534c BP |
685 | if (IS_ENABLED(CONFIG_X86_32)) |
686 | iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); | |
687 | else | |
688 | iup = &intel_ucode_patch; | |
689 | ||
690 | reget: | |
691 | if (!*iup) { | |
692 | patch = __load_ucode_intel(&uci); | |
693 | if (!patch) | |
694 | return; | |
6c545647 | 695 | |
06b8534c BP |
696 | *iup = patch; |
697 | } | |
698 | ||
699 | uci.mc = *iup; | |
700 | ||
701 | if (apply_microcode_early(&uci, true)) { | |
702 | /* Mixed-silicon system? Try to refetch the proper patch: */ | |
703 | *iup = NULL; | |
704 | ||
705 | goto reget; | |
706 | } | |
fe055896 BP |
707 | } |
708 | ||
06b8534c | 709 | static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) |
fe055896 | 710 | { |
06b8534c BP |
711 | struct microcode_header_intel *phdr; |
712 | struct ucode_patch *iter, *tmp; | |
fe055896 | 713 | |
06b8534c | 714 | list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { |
fe055896 | 715 | |
06b8534c | 716 | phdr = (struct microcode_header_intel *)iter->data; |
efaad554 | 717 | |
06b8534c BP |
718 | if (phdr->rev <= uci->cpu_sig.rev) |
719 | continue; | |
efaad554 | 720 | |
06b8534c BP |
721 | if (!find_matching_signature(phdr, |
722 | uci->cpu_sig.sig, | |
723 | uci->cpu_sig.pf)) | |
724 | continue; | |
fe055896 | 725 | |
06b8534c BP |
726 | return iter->data; |
727 | } | |
728 | return NULL; | |
fe055896 BP |
729 | } |
730 | ||
731 | void reload_ucode_intel(void) | |
732 | { | |
06b8534c | 733 | struct microcode_intel *p; |
fe055896 | 734 | struct ucode_cpu_info uci; |
fe055896 BP |
735 | |
736 | collect_cpu_info_early(&uci); | |
737 | ||
06b8534c BP |
738 | p = find_patch(&uci); |
739 | if (!p) | |
fe055896 BP |
740 | return; |
741 | ||
06b8534c BP |
742 | uci.mc = p; |
743 | ||
fe055896 BP |
744 | apply_microcode_early(&uci, false); |
745 | } | |
746 | ||
d45de409 | 747 | static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) |
1da177e4 | 748 | { |
354542d0 | 749 | static struct cpu_signature prev; |
92cb7612 | 750 | struct cpuinfo_x86 *c = &cpu_data(cpu_num); |
1da177e4 LT |
751 | unsigned int val[2]; |
752 | ||
d45de409 | 753 | memset(csig, 0, sizeof(*csig)); |
1da177e4 | 754 | |
d45de409 | 755 | csig->sig = cpuid_eax(0x00000001); |
9a3110bf SL |
756 | |
757 | if ((c->x86_model >= 5) || (c->x86 > 6)) { | |
758 | /* get processor flags from MSR 0x17 */ | |
759 | rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); | |
d45de409 | 760 | csig->pf = 1 << ((val[1] >> 18) & 7); |
1da177e4 LT |
761 | } |
762 | ||
506ed6b5 | 763 | csig->rev = c->microcode; |
354542d0 AK |
764 | |
765 | /* No extra locking on prev, races are harmless. */ | |
766 | if (csig->sig != prev.sig || csig->pf != prev.pf || csig->rev != prev.rev) { | |
767 | pr_info("sig=0x%x, pf=0x%x, revision=0x%x\n", | |
768 | csig->sig, csig->pf, csig->rev); | |
769 | prev = *csig; | |
770 | } | |
d45de409 DA |
771 | |
772 | return 0; | |
1da177e4 LT |
773 | } |
774 | ||
532ed374 | 775 | static int apply_microcode_intel(int cpu) |
1da177e4 | 776 | { |
de778275 | 777 | struct microcode_intel *mc; |
4bae1967 | 778 | struct ucode_cpu_info *uci; |
26cbaa4d | 779 | struct cpuinfo_x86 *c; |
354542d0 | 780 | static int prev_rev; |
4167709b | 781 | u32 rev; |
4bae1967 | 782 | |
9a3110bf | 783 | /* We should bind the task to the CPU */ |
26cbaa4d | 784 | if (WARN_ON(raw_smp_processor_id() != cpu)) |
58b5f2cc | 785 | return -1; |
9a3110bf | 786 | |
58b5f2cc BP |
787 | uci = ucode_cpu_info + cpu; |
788 | mc = uci->mc; | |
06b8534c BP |
789 | if (!mc) { |
790 | /* Look for a newer patch in our cache: */ | |
791 | mc = find_patch(uci); | |
792 | if (!mc) | |
793 | return 0; | |
794 | } | |
9cd4d78e | 795 | |
1da177e4 | 796 | /* write microcode via MSR 0x79 */ |
c416e611 | 797 | wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); |
245067d1 | 798 | |
4167709b | 799 | rev = intel_get_microcode_revision(); |
1da177e4 | 800 | |
4167709b | 801 | if (rev != mc->hdr.rev) { |
f58e1f53 | 802 | pr_err("CPU%d update to revision 0x%x failed\n", |
26cbaa4d | 803 | cpu, mc->hdr.rev); |
871b72dd | 804 | return -1; |
9a3110bf | 805 | } |
26cbaa4d | 806 | |
4167709b | 807 | if (rev != prev_rev) { |
354542d0 | 808 | pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", |
4167709b | 809 | rev, |
354542d0 AK |
810 | mc->hdr.date & 0xffff, |
811 | mc->hdr.date >> 24, | |
812 | (mc->hdr.date >> 16) & 0xff); | |
4167709b | 813 | prev_rev = rev; |
354542d0 | 814 | } |
4bae1967 | 815 | |
26cbaa4d BP |
816 | c = &cpu_data(cpu); |
817 | ||
4167709b BP |
818 | uci->cpu_sig.rev = rev; |
819 | c->microcode = rev; | |
871b72dd DA |
820 | |
821 | return 0; | |
1da177e4 LT |
822 | } |
823 | ||
871b72dd DA |
824 | static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, |
825 | int (*get_ucode_data)(void *, const void *, size_t)) | |
9a3110bf | 826 | { |
a0a29b62 | 827 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
938179b4 | 828 | u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL; |
a0a29b62 DA |
829 | int new_rev = uci->cpu_sig.rev; |
830 | unsigned int leftover = size; | |
2e86222c | 831 | unsigned int curr_mc_size = 0, new_mc_size = 0; |
9cd4d78e | 832 | unsigned int csig, cpf; |
9a3110bf | 833 | |
a0a29b62 DA |
834 | while (leftover) { |
835 | struct microcode_header_intel mc_header; | |
836 | unsigned int mc_size; | |
9a3110bf | 837 | |
35a9ff4e QC |
838 | if (leftover < sizeof(mc_header)) { |
839 | pr_err("error! Truncated header in microcode data file\n"); | |
840 | break; | |
841 | } | |
842 | ||
a0a29b62 DA |
843 | if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header))) |
844 | break; | |
a30a6a2c | 845 | |
a0a29b62 DA |
846 | mc_size = get_totalsize(&mc_header); |
847 | if (!mc_size || mc_size > leftover) { | |
f58e1f53 | 848 | pr_err("error! Bad data in microcode data file\n"); |
a0a29b62 DA |
849 | break; |
850 | } | |
a30a6a2c | 851 | |
938179b4 DS |
852 | /* For performance reasons, reuse mc area when possible */ |
853 | if (!mc || mc_size > curr_mc_size) { | |
5cdd2de0 | 854 | vfree(mc); |
938179b4 DS |
855 | mc = vmalloc(mc_size); |
856 | if (!mc) | |
857 | break; | |
858 | curr_mc_size = mc_size; | |
859 | } | |
a0a29b62 DA |
860 | |
861 | if (get_ucode_data(mc, ucode_ptr, mc_size) || | |
9cd4d78e | 862 | microcode_sanity_check(mc, 1) < 0) { |
a0a29b62 DA |
863 | break; |
864 | } | |
865 | ||
9cd4d78e FY |
866 | csig = uci->cpu_sig.sig; |
867 | cpf = uci->cpu_sig.pf; | |
8de3eafc | 868 | if (has_newer_microcode(mc, csig, cpf, new_rev)) { |
5cdd2de0 | 869 | vfree(new_mc); |
a0a29b62 DA |
870 | new_rev = mc_header.rev; |
871 | new_mc = mc; | |
2e86222c | 872 | new_mc_size = mc_size; |
938179b4 DS |
873 | mc = NULL; /* trigger new vmalloc */ |
874 | } | |
a0a29b62 DA |
875 | |
876 | ucode_ptr += mc_size; | |
877 | leftover -= mc_size; | |
a30a6a2c SL |
878 | } |
879 | ||
5cdd2de0 | 880 | vfree(mc); |
938179b4 | 881 | |
871b72dd | 882 | if (leftover) { |
5cdd2de0 | 883 | vfree(new_mc); |
f61337d9 | 884 | return UCODE_ERROR; |
871b72dd | 885 | } |
4bae1967 | 886 | |
f61337d9 BP |
887 | if (!new_mc) |
888 | return UCODE_NFOUND; | |
a0a29b62 | 889 | |
5cdd2de0 | 890 | vfree(uci->mc); |
4bae1967 IM |
891 | uci->mc = (struct microcode_intel *)new_mc; |
892 | ||
9cd4d78e FY |
893 | /* |
894 | * If early loading microcode is supported, save this mc into | |
895 | * permanent memory. So it will be loaded early when a CPU is hot added | |
896 | * or resumes. | |
897 | */ | |
2e86222c | 898 | save_mc_for_early(new_mc, new_mc_size); |
9cd4d78e | 899 | |
f58e1f53 JP |
900 | pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", |
901 | cpu, new_rev, uci->cpu_sig.rev); | |
f61337d9 BP |
902 | |
903 | return UCODE_OK; | |
a30a6a2c SL |
904 | } |
905 | ||
a0a29b62 DA |
906 | static int get_ucode_fw(void *to, const void *from, size_t n) |
907 | { | |
908 | memcpy(to, from, n); | |
909 | return 0; | |
910 | } | |
a30a6a2c | 911 | |
723f2828 BP |
912 | static bool is_blacklisted(unsigned int cpu) |
913 | { | |
914 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
915 | ||
b94b7373 JZ |
916 | /* |
917 | * Late loading on model 79 with microcode revision less than 0x0b000021 | |
7e702d17 JZ |
918 | * and LLC size per core bigger than 2.5MB may result in a system hang. |
919 | * This behavior is documented in item BDF90, #334165 (Intel Xeon | |
920 | * Processor E7-8800/4800 v4 Product Family). | |
b94b7373 JZ |
921 | */ |
922 | if (c->x86 == 6 && | |
923 | c->x86_model == INTEL_FAM6_BROADWELL_X && | |
b399151c | 924 | c->x86_stepping == 0x01 && |
7e702d17 | 925 | llc_size_per_core > 2621440 && |
b94b7373 JZ |
926 | c->microcode < 0x0b000021) { |
927 | pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode); | |
928 | pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n"); | |
723f2828 BP |
929 | return true; |
930 | } | |
931 | ||
932 | return false; | |
933 | } | |
934 | ||
48e30685 BP |
935 | static enum ucode_state request_microcode_fw(int cpu, struct device *device, |
936 | bool refresh_fw) | |
a30a6a2c SL |
937 | { |
938 | char name[30]; | |
92cb7612 | 939 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
a30a6a2c | 940 | const struct firmware *firmware; |
871b72dd | 941 | enum ucode_state ret; |
a30a6a2c | 942 | |
723f2828 BP |
943 | if (is_blacklisted(cpu)) |
944 | return UCODE_NFOUND; | |
945 | ||
3e135d88 | 946 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
b399151c | 947 | c->x86, c->x86_model, c->x86_stepping); |
871b72dd | 948 | |
75da02b2 | 949 | if (request_firmware_direct(&firmware, name, device)) { |
f58e1f53 | 950 | pr_debug("data file %s load failed\n", name); |
871b72dd | 951 | return UCODE_NFOUND; |
a30a6a2c | 952 | } |
a0a29b62 | 953 | |
dd3feda7 JSR |
954 | ret = generic_load_microcode(cpu, (void *)firmware->data, |
955 | firmware->size, &get_ucode_fw); | |
a0a29b62 | 956 | |
a30a6a2c SL |
957 | release_firmware(firmware); |
958 | ||
a0a29b62 DA |
959 | return ret; |
960 | } | |
961 | ||
962 | static int get_ucode_user(void *to, const void *from, size_t n) | |
963 | { | |
964 | return copy_from_user(to, from, n); | |
965 | } | |
966 | ||
871b72dd DA |
967 | static enum ucode_state |
968 | request_microcode_user(int cpu, const void __user *buf, size_t size) | |
a0a29b62 | 969 | { |
723f2828 BP |
970 | if (is_blacklisted(cpu)) |
971 | return UCODE_NFOUND; | |
972 | ||
dd3feda7 | 973 | return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); |
a30a6a2c SL |
974 | } |
975 | ||
4db646b1 | 976 | static struct microcode_ops microcode_intel_ops = { |
a0a29b62 DA |
977 | .request_microcode_user = request_microcode_user, |
978 | .request_microcode_fw = request_microcode_fw, | |
8d86f390 | 979 | .collect_cpu_info = collect_cpu_info, |
532ed374 | 980 | .apply_microcode = apply_microcode_intel, |
8d86f390 PO |
981 | }; |
982 | ||
7e702d17 JZ |
983 | static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) |
984 | { | |
985 | u64 llc_size = c->x86_cache_size * 1024; | |
986 | ||
987 | do_div(llc_size, c->x86_max_cores); | |
988 | ||
989 | return (int)llc_size; | |
990 | } | |
991 | ||
18dbc916 | 992 | struct microcode_ops * __init init_intel_microcode(void) |
8d86f390 | 993 | { |
9a2bc335 | 994 | struct cpuinfo_x86 *c = &boot_cpu_data; |
7164b3f5 SB |
995 | |
996 | if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 || | |
997 | cpu_has(c, X86_FEATURE_IA64)) { | |
998 | pr_err("Intel CPU family 0x%x not supported\n", c->x86); | |
999 | return NULL; | |
1000 | } | |
1001 | ||
7e702d17 JZ |
1002 | llc_size_per_core = calc_llc_size_per_core(c); |
1003 | ||
18dbc916 | 1004 | return µcode_intel_ops; |
8d86f390 | 1005 | } |