Commit | Line | Data |
---|---|---|
80cc9f10 PO |
1 | /* |
2 | * AMD CPU Microcode Update Driver for Linux | |
fe055896 BP |
3 | * |
4 | * This driver allows to upgrade microcode on F10h AMD | |
5 | * CPUs and later. | |
6 | * | |
597e11a3 | 7 | * Copyright (C) 2008-2011 Advanced Micro Devices Inc. |
80cc9f10 PO |
8 | * |
9 | * Author: Peter Oruba <peter.oruba@amd.com> | |
10 | * | |
11 | * Based on work by: | |
12 | * Tigran Aivazian <tigran@aivazian.fsnet.co.uk> | |
13 | * | |
fe055896 BP |
14 | * early loader: |
15 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
16 | * | |
17 | * Author: Jacob Shin <jacob.shin@amd.com> | |
18 | * Fixes: Borislav Petkov <bp@suse.de> | |
80cc9f10 | 19 | * |
2a3282a7 | 20 | * Licensed under the terms of the GNU General Public |
80cc9f10 | 21 | * License version 2. See file COPYING for details. |
4bae1967 | 22 | */ |
6b26e1bf | 23 | #define pr_fmt(fmt) "microcode: " fmt |
f58e1f53 | 24 | |
fe055896 | 25 | #include <linux/earlycpio.h> |
4bae1967 | 26 | #include <linux/firmware.h> |
4bae1967 IM |
27 | #include <linux/uaccess.h> |
28 | #include <linux/vmalloc.h> | |
fe055896 | 29 | #include <linux/initrd.h> |
4bae1967 | 30 | #include <linux/kernel.h> |
80cc9f10 | 31 | #include <linux/pci.h> |
80cc9f10 | 32 | |
fe055896 | 33 | #include <asm/microcode_amd.h> |
80cc9f10 | 34 | #include <asm/microcode.h> |
4bae1967 | 35 | #include <asm/processor.h> |
fe055896 BP |
36 | #include <asm/setup.h> |
37 | #include <asm/cpu.h> | |
4bae1967 | 38 | #include <asm/msr.h> |
80cc9f10 | 39 | |
a0a29b62 | 40 | static struct equiv_cpu_entry *equiv_cpu_table; |
80cc9f10 | 41 | |
a3eb3b4d BP |
42 | struct ucode_patch { |
43 | struct list_head plist; | |
44 | void *data; | |
45 | u32 patch_id; | |
46 | u16 equiv_cpu; | |
47 | }; | |
48 | ||
49 | static LIST_HEAD(pcache); | |
50 | ||
fe055896 BP |
51 | /* |
52 | * This points to the current valid container of microcode patches which we will | |
53 | * save from the initrd before jettisoning its contents. | |
54 | */ | |
55 | static u8 *container; | |
56 | static size_t container_size; | |
57 | ||
58 | static u32 ucode_new_rev; | |
59 | u8 amd_ucode_patch[PATCH_MAX_SIZE]; | |
60 | static u16 this_equiv_id; | |
61 | ||
62 | static struct cpio_data ucode_cpio; | |
63 | ||
fe055896 BP |
64 | static struct cpio_data __init find_ucode_in_initrd(void) |
65 | { | |
6c545647 | 66 | #ifdef CONFIG_BLK_DEV_INITRD |
fe055896 BP |
67 | long offset = 0; |
68 | char *path; | |
69 | void *start; | |
70 | size_t size; | |
71 | ||
6c545647 BP |
72 | /* |
73 | * Microcode patch container file is prepended to the initrd in cpio | |
74 | * format. See Documentation/x86/early-microcode.txt | |
75 | */ | |
76 | static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; | |
77 | ||
fe055896 BP |
78 | #ifdef CONFIG_X86_32 |
79 | struct boot_params *p; | |
80 | ||
81 | /* | |
82 | * On 32-bit, early load occurs before paging is turned on so we need | |
83 | * to use physical addresses. | |
84 | */ | |
85 | p = (struct boot_params *)__pa_nodebug(&boot_params); | |
86 | path = (char *)__pa_nodebug(ucode_path); | |
87 | start = (void *)p->hdr.ramdisk_image; | |
88 | size = p->hdr.ramdisk_size; | |
89 | #else | |
90 | path = ucode_path; | |
91 | start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET); | |
92 | size = boot_params.hdr.ramdisk_size; | |
6c545647 | 93 | #endif /* !CONFIG_X86_32 */ |
fe055896 BP |
94 | |
95 | return find_cpio_data(path, start, size, &offset); | |
6c545647 BP |
96 | #else |
97 | return (struct cpio_data){ NULL, 0, "" }; | |
98 | #endif | |
fe055896 BP |
99 | } |
100 | ||
101 | static size_t compute_container_size(u8 *data, u32 total_size) | |
102 | { | |
103 | size_t size = 0; | |
104 | u32 *header = (u32 *)data; | |
105 | ||
106 | if (header[0] != UCODE_MAGIC || | |
107 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | |
108 | header[2] == 0) /* size */ | |
109 | return size; | |
110 | ||
111 | size = header[2] + CONTAINER_HDR_SZ; | |
112 | total_size -= size; | |
113 | data += size; | |
114 | ||
115 | while (total_size) { | |
116 | u16 patch_size; | |
117 | ||
118 | header = (u32 *)data; | |
119 | ||
120 | if (header[0] != UCODE_UCODE_TYPE) | |
121 | break; | |
122 | ||
123 | /* | |
124 | * Sanity-check patch size. | |
125 | */ | |
126 | patch_size = header[1]; | |
127 | if (patch_size > PATCH_MAX_SIZE) | |
128 | break; | |
129 | ||
130 | size += patch_size + SECTION_HDR_SIZE; | |
131 | data += patch_size + SECTION_HDR_SIZE; | |
132 | total_size -= patch_size + SECTION_HDR_SIZE; | |
133 | } | |
134 | ||
135 | return size; | |
136 | } | |
137 | ||
138 | /* | |
139 | * Early load occurs before we can vmalloc(). So we look for the microcode | |
140 | * patch container file in initrd, traverse equivalent cpu table, look for a | |
141 | * matching microcode patch, and update, all in initrd memory in place. | |
142 | * When vmalloc() is available for use later -- on 64-bit during first AP load, | |
143 | * and on 32-bit during save_microcode_in_initrd_amd() -- we can call | |
144 | * load_microcode_amd() to save equivalent cpu table and microcode patches in | |
145 | * kernel heap memory. | |
146 | */ | |
147 | static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch) | |
148 | { | |
149 | struct equiv_cpu_entry *eq; | |
150 | size_t *cont_sz; | |
151 | u32 *header; | |
152 | u8 *data, **cont; | |
153 | u8 (*patch)[PATCH_MAX_SIZE]; | |
154 | u16 eq_id = 0; | |
155 | int offset, left; | |
156 | u32 rev, eax, ebx, ecx, edx; | |
157 | u32 *new_rev; | |
158 | ||
159 | #ifdef CONFIG_X86_32 | |
160 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); | |
161 | cont_sz = (size_t *)__pa_nodebug(&container_size); | |
162 | cont = (u8 **)__pa_nodebug(&container); | |
163 | patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch); | |
164 | #else | |
165 | new_rev = &ucode_new_rev; | |
166 | cont_sz = &container_size; | |
167 | cont = &container; | |
168 | patch = &amd_ucode_patch; | |
169 | #endif | |
170 | ||
171 | data = ucode; | |
172 | left = size; | |
173 | header = (u32 *)data; | |
174 | ||
175 | /* find equiv cpu table */ | |
176 | if (header[0] != UCODE_MAGIC || | |
177 | header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | |
178 | header[2] == 0) /* size */ | |
179 | return; | |
180 | ||
181 | eax = 0x00000001; | |
182 | ecx = 0; | |
183 | native_cpuid(&eax, &ebx, &ecx, &edx); | |
184 | ||
185 | while (left > 0) { | |
186 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); | |
187 | ||
188 | *cont = data; | |
189 | ||
190 | /* Advance past the container header */ | |
191 | offset = header[2] + CONTAINER_HDR_SZ; | |
192 | data += offset; | |
193 | left -= offset; | |
194 | ||
195 | eq_id = find_equiv_id(eq, eax); | |
196 | if (eq_id) { | |
197 | this_equiv_id = eq_id; | |
198 | *cont_sz = compute_container_size(*cont, left + offset); | |
199 | ||
200 | /* | |
201 | * truncate how much we need to iterate over in the | |
202 | * ucode update loop below | |
203 | */ | |
204 | left = *cont_sz - offset; | |
205 | break; | |
206 | } | |
207 | ||
208 | /* | |
209 | * support multiple container files appended together. if this | |
210 | * one does not have a matching equivalent cpu entry, we fast | |
211 | * forward to the next container file. | |
212 | */ | |
213 | while (left > 0) { | |
214 | header = (u32 *)data; | |
215 | if (header[0] == UCODE_MAGIC && | |
216 | header[1] == UCODE_EQUIV_CPU_TABLE_TYPE) | |
217 | break; | |
218 | ||
219 | offset = header[1] + SECTION_HDR_SIZE; | |
220 | data += offset; | |
221 | left -= offset; | |
222 | } | |
223 | ||
224 | /* mark where the next microcode container file starts */ | |
225 | offset = data - (u8 *)ucode; | |
226 | ucode = data; | |
227 | } | |
228 | ||
229 | if (!eq_id) { | |
230 | *cont = NULL; | |
231 | *cont_sz = 0; | |
232 | return; | |
233 | } | |
234 | ||
235 | if (check_current_patch_level(&rev, true)) | |
236 | return; | |
237 | ||
238 | while (left > 0) { | |
239 | struct microcode_amd *mc; | |
240 | ||
241 | header = (u32 *)data; | |
242 | if (header[0] != UCODE_UCODE_TYPE || /* type */ | |
243 | header[1] == 0) /* size */ | |
244 | break; | |
245 | ||
246 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); | |
247 | ||
248 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) { | |
249 | ||
250 | if (!__apply_microcode_amd(mc)) { | |
251 | rev = mc->hdr.patch_id; | |
252 | *new_rev = rev; | |
253 | ||
254 | if (save_patch) | |
255 | memcpy(patch, mc, | |
256 | min_t(u32, header[1], PATCH_MAX_SIZE)); | |
257 | } | |
258 | } | |
259 | ||
260 | offset = header[1] + SECTION_HDR_SIZE; | |
261 | data += offset; | |
262 | left -= offset; | |
263 | } | |
264 | } | |
265 | ||
266 | static bool __init load_builtin_amd_microcode(struct cpio_data *cp, | |
267 | unsigned int family) | |
268 | { | |
269 | #ifdef CONFIG_X86_64 | |
270 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; | |
271 | ||
272 | if (family >= 0x15) | |
273 | snprintf(fw_name, sizeof(fw_name), | |
274 | "amd-ucode/microcode_amd_fam%.2xh.bin", family); | |
275 | ||
276 | return get_builtin_firmware(cp, fw_name); | |
277 | #else | |
278 | return false; | |
279 | #endif | |
280 | } | |
281 | ||
282 | void __init load_ucode_amd_bsp(unsigned int family) | |
283 | { | |
284 | struct cpio_data cp; | |
285 | void **data; | |
286 | size_t *size; | |
287 | ||
288 | #ifdef CONFIG_X86_32 | |
289 | data = (void **)__pa_nodebug(&ucode_cpio.data); | |
290 | size = (size_t *)__pa_nodebug(&ucode_cpio.size); | |
291 | #else | |
292 | data = &ucode_cpio.data; | |
293 | size = &ucode_cpio.size; | |
294 | #endif | |
295 | ||
6c545647 BP |
296 | if (!load_builtin_amd_microcode(&cp, family)) |
297 | cp = find_ucode_in_initrd(); | |
298 | ||
299 | if (!(cp.data && cp.size)) | |
300 | return; | |
fe055896 BP |
301 | |
302 | *data = cp.data; | |
303 | *size = cp.size; | |
304 | ||
305 | apply_ucode_in_initrd(cp.data, cp.size, true); | |
306 | } | |
307 | ||
308 | #ifdef CONFIG_X86_32 | |
309 | /* | |
310 | * On 32-bit, since AP's early load occurs before paging is turned on, we | |
311 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during | |
312 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During | |
313 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch, | |
314 | * which is used upon resume from suspend. | |
315 | */ | |
316 | void load_ucode_amd_ap(void) | |
317 | { | |
318 | struct microcode_amd *mc; | |
319 | size_t *usize; | |
320 | void **ucode; | |
321 | ||
322 | mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch); | |
323 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { | |
324 | __apply_microcode_amd(mc); | |
325 | return; | |
326 | } | |
327 | ||
328 | ucode = (void *)__pa_nodebug(&container); | |
329 | usize = (size_t *)__pa_nodebug(&container_size); | |
330 | ||
331 | if (!*ucode || !*usize) | |
332 | return; | |
333 | ||
334 | apply_ucode_in_initrd(*ucode, *usize, false); | |
335 | } | |
336 | ||
337 | static void __init collect_cpu_sig_on_bsp(void *arg) | |
338 | { | |
339 | unsigned int cpu = smp_processor_id(); | |
340 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
341 | ||
342 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | |
343 | } | |
344 | ||
345 | static void __init get_bsp_sig(void) | |
346 | { | |
347 | unsigned int bsp = boot_cpu_data.cpu_index; | |
348 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | |
349 | ||
350 | if (!uci->cpu_sig.sig) | |
351 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); | |
352 | } | |
353 | #else | |
354 | void load_ucode_amd_ap(void) | |
355 | { | |
356 | unsigned int cpu = smp_processor_id(); | |
357 | struct equiv_cpu_entry *eq; | |
358 | struct microcode_amd *mc; | |
359 | u32 rev, eax; | |
360 | u16 eq_id; | |
361 | ||
362 | /* Exit if called on the BSP. */ | |
363 | if (!cpu) | |
364 | return; | |
365 | ||
366 | if (!container) | |
367 | return; | |
368 | ||
369 | /* | |
370 | * 64-bit runs with paging enabled, thus early==false. | |
371 | */ | |
372 | if (check_current_patch_level(&rev, false)) | |
373 | return; | |
374 | ||
375 | eax = cpuid_eax(0x00000001); | |
376 | eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ); | |
377 | ||
378 | eq_id = find_equiv_id(eq, eax); | |
379 | if (!eq_id) | |
380 | return; | |
381 | ||
382 | if (eq_id == this_equiv_id) { | |
383 | mc = (struct microcode_amd *)amd_ucode_patch; | |
384 | ||
385 | if (mc && rev < mc->hdr.patch_id) { | |
386 | if (!__apply_microcode_amd(mc)) | |
387 | ucode_new_rev = mc->hdr.patch_id; | |
388 | } | |
389 | ||
390 | } else { | |
391 | if (!ucode_cpio.data) | |
392 | return; | |
393 | ||
394 | /* | |
395 | * AP has a different equivalence ID than BSP, looks like | |
396 | * mixed-steppings silicon so go through the ucode blob anew. | |
397 | */ | |
398 | apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false); | |
399 | } | |
400 | } | |
401 | #endif | |
402 | ||
403 | int __init save_microcode_in_initrd_amd(void) | |
404 | { | |
405 | unsigned long cont; | |
406 | int retval = 0; | |
407 | enum ucode_state ret; | |
408 | u8 *cont_va; | |
409 | u32 eax; | |
410 | ||
411 | if (!container) | |
412 | return -EINVAL; | |
413 | ||
414 | #ifdef CONFIG_X86_32 | |
415 | get_bsp_sig(); | |
416 | cont = (unsigned long)container; | |
417 | cont_va = __va(container); | |
418 | #else | |
419 | /* | |
420 | * We need the physical address of the container for both bitness since | |
421 | * boot_params.hdr.ramdisk_image is a physical address. | |
422 | */ | |
423 | cont = __pa(container); | |
424 | cont_va = container; | |
425 | #endif | |
426 | ||
427 | /* | |
428 | * Take into account the fact that the ramdisk might get relocated and | |
429 | * therefore we need to recompute the container's position in virtual | |
430 | * memory space. | |
431 | */ | |
432 | if (relocated_ramdisk) | |
433 | container = (u8 *)(__va(relocated_ramdisk) + | |
434 | (cont - boot_params.hdr.ramdisk_image)); | |
435 | else | |
436 | container = cont_va; | |
437 | ||
fe055896 BP |
438 | eax = cpuid_eax(0x00000001); |
439 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | |
440 | ||
441 | ret = load_microcode_amd(smp_processor_id(), eax, container, container_size); | |
442 | if (ret != UCODE_OK) | |
443 | retval = -EINVAL; | |
444 | ||
445 | /* | |
446 | * This will be freed any msec now, stash patches for the current | |
447 | * family and switch to patch cache for cpu hotplug, etc later. | |
448 | */ | |
449 | container = NULL; | |
450 | container_size = 0; | |
451 | ||
452 | return retval; | |
453 | } | |
454 | ||
455 | void reload_ucode_amd(void) | |
456 | { | |
457 | struct microcode_amd *mc; | |
458 | u32 rev; | |
459 | ||
460 | /* | |
461 | * early==false because this is a syscore ->resume path and by | |
462 | * that time paging is long enabled. | |
463 | */ | |
464 | if (check_current_patch_level(&rev, false)) | |
465 | return; | |
466 | ||
467 | mc = (struct microcode_amd *)amd_ucode_patch; | |
468 | ||
469 | if (mc && rev < mc->hdr.patch_id) { | |
470 | if (!__apply_microcode_amd(mc)) { | |
471 | ucode_new_rev = mc->hdr.patch_id; | |
a58017c6 | 472 | pr_info("reload patch_level=0x%08x\n", ucode_new_rev); |
fe055896 BP |
473 | } |
474 | } | |
475 | } | |
a76096a6 | 476 | static u16 __find_equiv_id(unsigned int cpu) |
c96d2c09 BP |
477 | { |
478 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
a76096a6 | 479 | return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig); |
c96d2c09 BP |
480 | } |
481 | ||
482 | static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu) | |
483 | { | |
484 | int i = 0; | |
485 | ||
486 | BUG_ON(!equiv_cpu_table); | |
487 | ||
488 | while (equiv_cpu_table[i].equiv_cpu != 0) { | |
489 | if (equiv_cpu == equiv_cpu_table[i].equiv_cpu) | |
490 | return equiv_cpu_table[i].installed_cpu; | |
491 | i++; | |
492 | } | |
493 | return 0; | |
494 | } | |
495 | ||
a3eb3b4d BP |
496 | /* |
497 | * a small, trivial cache of per-family ucode patches | |
498 | */ | |
499 | static struct ucode_patch *cache_find_patch(u16 equiv_cpu) | |
500 | { | |
501 | struct ucode_patch *p; | |
502 | ||
503 | list_for_each_entry(p, &pcache, plist) | |
504 | if (p->equiv_cpu == equiv_cpu) | |
505 | return p; | |
506 | return NULL; | |
507 | } | |
508 | ||
509 | static void update_cache(struct ucode_patch *new_patch) | |
510 | { | |
511 | struct ucode_patch *p; | |
512 | ||
513 | list_for_each_entry(p, &pcache, plist) { | |
514 | if (p->equiv_cpu == new_patch->equiv_cpu) { | |
515 | if (p->patch_id >= new_patch->patch_id) | |
516 | /* we already have the latest patch */ | |
517 | return; | |
518 | ||
519 | list_replace(&p->plist, &new_patch->plist); | |
520 | kfree(p->data); | |
521 | kfree(p); | |
522 | return; | |
523 | } | |
524 | } | |
525 | /* no patch found, add it */ | |
526 | list_add_tail(&new_patch->plist, &pcache); | |
527 | } | |
528 | ||
529 | static void free_cache(void) | |
530 | { | |
2d297480 | 531 | struct ucode_patch *p, *tmp; |
a3eb3b4d | 532 | |
2d297480 | 533 | list_for_each_entry_safe(p, tmp, &pcache, plist) { |
a3eb3b4d BP |
534 | __list_del(p->plist.prev, p->plist.next); |
535 | kfree(p->data); | |
536 | kfree(p); | |
537 | } | |
538 | } | |
539 | ||
540 | static struct ucode_patch *find_patch(unsigned int cpu) | |
541 | { | |
542 | u16 equiv_id; | |
543 | ||
a76096a6 | 544 | equiv_id = __find_equiv_id(cpu); |
a3eb3b4d BP |
545 | if (!equiv_id) |
546 | return NULL; | |
547 | ||
548 | return cache_find_patch(equiv_id); | |
549 | } | |
550 | ||
d45de409 | 551 | static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) |
80cc9f10 | 552 | { |
3b2e3d85 | 553 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
757885e9 JS |
554 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; |
555 | struct ucode_patch *p; | |
80cc9f10 | 556 | |
5f5b7472 | 557 | csig->sig = cpuid_eax(0x00000001); |
bcb80e53 | 558 | csig->rev = c->microcode; |
757885e9 JS |
559 | |
560 | /* | |
561 | * a patch could have been loaded early, set uci->mc so that | |
562 | * mc_bp_resume() can call apply_microcode() | |
563 | */ | |
564 | p = find_patch(cpu); | |
565 | if (p && (p->patch_id == csig->rev)) | |
566 | uci->mc = p->data; | |
567 | ||
258721ef BP |
568 | pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); |
569 | ||
d45de409 | 570 | return 0; |
80cc9f10 PO |
571 | } |
572 | ||
84516098 | 573 | static unsigned int verify_patch_size(u8 family, u32 patch_size, |
be62adb4 | 574 | unsigned int size) |
80cc9f10 | 575 | { |
be62adb4 BP |
576 | u32 max_size; |
577 | ||
578 | #define F1XH_MPB_MAX_SIZE 2048 | |
579 | #define F14H_MPB_MAX_SIZE 1824 | |
580 | #define F15H_MPB_MAX_SIZE 4096 | |
36c46ca4 | 581 | #define F16H_MPB_MAX_SIZE 3458 |
be62adb4 | 582 | |
84516098 | 583 | switch (family) { |
be62adb4 BP |
584 | case 0x14: |
585 | max_size = F14H_MPB_MAX_SIZE; | |
586 | break; | |
587 | case 0x15: | |
588 | max_size = F15H_MPB_MAX_SIZE; | |
589 | break; | |
36c46ca4 BO |
590 | case 0x16: |
591 | max_size = F16H_MPB_MAX_SIZE; | |
592 | break; | |
be62adb4 BP |
593 | default: |
594 | max_size = F1XH_MPB_MAX_SIZE; | |
595 | break; | |
596 | } | |
597 | ||
598 | if (patch_size > min_t(u32, size, max_size)) { | |
599 | pr_err("patch size mismatch\n"); | |
600 | return 0; | |
601 | } | |
602 | ||
603 | return patch_size; | |
604 | } | |
605 | ||
0399f732 BP |
606 | /* |
607 | * Those patch levels cannot be updated to newer ones and thus should be final. | |
608 | */ | |
609 | static u32 final_levels[] = { | |
610 | 0x01000098, | |
611 | 0x0100009f, | |
612 | 0x010000af, | |
613 | 0, /* T-101 terminator */ | |
614 | }; | |
615 | ||
2eff73c0 BP |
616 | /* |
617 | * Check the current patch level on this CPU. | |
618 | * | |
619 | * @rev: Use it to return the patch level. It is set to 0 in the case of | |
620 | * error. | |
621 | * | |
622 | * Returns: | |
623 | * - true: if update should stop | |
624 | * - false: otherwise | |
625 | */ | |
0399f732 | 626 | bool check_current_patch_level(u32 *rev, bool early) |
2eff73c0 | 627 | { |
0399f732 BP |
628 | u32 lvl, dummy, i; |
629 | bool ret = false; | |
630 | u32 *levels; | |
631 | ||
632 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); | |
633 | ||
634 | if (IS_ENABLED(CONFIG_X86_32) && early) | |
635 | levels = (u32 *)__pa_nodebug(&final_levels); | |
636 | else | |
637 | levels = final_levels; | |
638 | ||
639 | for (i = 0; levels[i]; i++) { | |
640 | if (lvl == levels[i]) { | |
641 | lvl = 0; | |
642 | ret = true; | |
643 | break; | |
644 | } | |
645 | } | |
2eff73c0 | 646 | |
0399f732 BP |
647 | if (rev) |
648 | *rev = lvl; | |
2eff73c0 | 649 | |
0399f732 | 650 | return ret; |
2eff73c0 BP |
651 | } |
652 | ||
a76096a6 JS |
653 | int __apply_microcode_amd(struct microcode_amd *mc_amd) |
654 | { | |
655 | u32 rev, dummy; | |
656 | ||
5335ba5c | 657 | native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code); |
a76096a6 JS |
658 | |
659 | /* verify patch application was successful */ | |
5335ba5c | 660 | native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); |
a76096a6 JS |
661 | if (rev != mc_amd->hdr.patch_id) |
662 | return -1; | |
663 | ||
664 | return 0; | |
665 | } | |
666 | ||
667 | int apply_microcode_amd(int cpu) | |
80cc9f10 | 668 | { |
bcb80e53 | 669 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
2efb05e8 BP |
670 | struct microcode_amd *mc_amd; |
671 | struct ucode_cpu_info *uci; | |
672 | struct ucode_patch *p; | |
2eff73c0 | 673 | u32 rev; |
2efb05e8 BP |
674 | |
675 | BUG_ON(raw_smp_processor_id() != cpu); | |
80cc9f10 | 676 | |
2efb05e8 | 677 | uci = ucode_cpu_info + cpu; |
80cc9f10 | 678 | |
2efb05e8 BP |
679 | p = find_patch(cpu); |
680 | if (!p) | |
871b72dd | 681 | return 0; |
80cc9f10 | 682 | |
2efb05e8 BP |
683 | mc_amd = p->data; |
684 | uci->mc = p->data; | |
685 | ||
0399f732 | 686 | if (check_current_patch_level(&rev, false)) |
2eff73c0 | 687 | return -1; |
80cc9f10 | 688 | |
685ca6d7 BP |
689 | /* need to apply patch? */ |
690 | if (rev >= mc_amd->hdr.patch_id) { | |
691 | c->microcode = rev; | |
accd1e82 | 692 | uci->cpu_sig.rev = rev; |
685ca6d7 BP |
693 | return 0; |
694 | } | |
695 | ||
d982057f | 696 | if (__apply_microcode_amd(mc_amd)) { |
258721ef | 697 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
a76096a6 | 698 | cpu, mc_amd->hdr.patch_id); |
d982057f TK |
699 | return -1; |
700 | } | |
701 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | |
702 | mc_amd->hdr.patch_id); | |
80cc9f10 | 703 | |
a76096a6 JS |
704 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
705 | c->microcode = mc_amd->hdr.patch_id; | |
871b72dd DA |
706 | |
707 | return 0; | |
80cc9f10 PO |
708 | } |
709 | ||
0657d9eb | 710 | static int install_equiv_cpu_table(const u8 *buf) |
80cc9f10 | 711 | { |
10de52d6 BP |
712 | unsigned int *ibuf = (unsigned int *)buf; |
713 | unsigned int type = ibuf[1]; | |
714 | unsigned int size = ibuf[2]; | |
80cc9f10 | 715 | |
10de52d6 | 716 | if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) { |
258721ef BP |
717 | pr_err("empty section/" |
718 | "invalid type field in container file section header\n"); | |
10de52d6 | 719 | return -EINVAL; |
80cc9f10 PO |
720 | } |
721 | ||
8e5e9521 | 722 | equiv_cpu_table = vmalloc(size); |
80cc9f10 | 723 | if (!equiv_cpu_table) { |
f58e1f53 | 724 | pr_err("failed to allocate equivalent CPU table\n"); |
10de52d6 | 725 | return -ENOMEM; |
80cc9f10 PO |
726 | } |
727 | ||
e7e632f5 | 728 | memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size); |
80cc9f10 | 729 | |
40b7f3df BP |
730 | /* add header length */ |
731 | return size + CONTAINER_HDR_SZ; | |
80cc9f10 PO |
732 | } |
733 | ||
a0a29b62 | 734 | static void free_equiv_cpu_table(void) |
80cc9f10 | 735 | { |
aeef50bc F |
736 | vfree(equiv_cpu_table); |
737 | equiv_cpu_table = NULL; | |
a0a29b62 | 738 | } |
80cc9f10 | 739 | |
2efb05e8 | 740 | static void cleanup(void) |
a0a29b62 | 741 | { |
2efb05e8 BP |
742 | free_equiv_cpu_table(); |
743 | free_cache(); | |
744 | } | |
745 | ||
746 | /* | |
747 | * We return the current size even if some of the checks failed so that | |
748 | * we can skip over the next patch. If we return a negative value, we | |
749 | * signal a grave error like a memory allocation has failed and the | |
750 | * driver cannot continue functioning normally. In such cases, we tear | |
751 | * down everything we've used up so far and exit. | |
752 | */ | |
84516098 | 753 | static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) |
2efb05e8 | 754 | { |
2efb05e8 BP |
755 | struct microcode_header_amd *mc_hdr; |
756 | struct ucode_patch *patch; | |
757 | unsigned int patch_size, crnt_size, ret; | |
758 | u32 proc_fam; | |
759 | u16 proc_id; | |
760 | ||
761 | patch_size = *(u32 *)(fw + 4); | |
762 | crnt_size = patch_size + SECTION_HDR_SIZE; | |
763 | mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE); | |
764 | proc_id = mc_hdr->processor_rev_id; | |
765 | ||
766 | proc_fam = find_cpu_family_by_equiv_cpu(proc_id); | |
767 | if (!proc_fam) { | |
768 | pr_err("No patch family for equiv ID: 0x%04x\n", proc_id); | |
769 | return crnt_size; | |
770 | } | |
771 | ||
772 | /* check if patch is for the current family */ | |
773 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); | |
84516098 | 774 | if (proc_fam != family) |
2efb05e8 BP |
775 | return crnt_size; |
776 | ||
777 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { | |
778 | pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", | |
779 | mc_hdr->patch_id); | |
780 | return crnt_size; | |
781 | } | |
782 | ||
84516098 | 783 | ret = verify_patch_size(family, patch_size, leftover); |
2efb05e8 BP |
784 | if (!ret) { |
785 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); | |
786 | return crnt_size; | |
787 | } | |
788 | ||
789 | patch = kzalloc(sizeof(*patch), GFP_KERNEL); | |
790 | if (!patch) { | |
791 | pr_err("Patch allocation failure.\n"); | |
792 | return -EINVAL; | |
793 | } | |
794 | ||
9cc6f743 | 795 | patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL); |
2efb05e8 BP |
796 | if (!patch->data) { |
797 | pr_err("Patch data allocation failure.\n"); | |
798 | kfree(patch); | |
799 | return -EINVAL; | |
800 | } | |
801 | ||
2efb05e8 BP |
802 | INIT_LIST_HEAD(&patch->plist); |
803 | patch->patch_id = mc_hdr->patch_id; | |
804 | patch->equiv_cpu = proc_id; | |
805 | ||
5335ba5c BP |
806 | pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n", |
807 | __func__, patch->patch_id, proc_id); | |
808 | ||
2efb05e8 BP |
809 | /* ... and add to cache. */ |
810 | update_cache(patch); | |
811 | ||
812 | return crnt_size; | |
813 | } | |
814 | ||
84516098 TK |
815 | static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, |
816 | size_t size) | |
2efb05e8 BP |
817 | { |
818 | enum ucode_state ret = UCODE_ERROR; | |
819 | unsigned int leftover; | |
820 | u8 *fw = (u8 *)data; | |
821 | int crnt_size = 0; | |
1396fa9c | 822 | int offset; |
80cc9f10 | 823 | |
2efb05e8 | 824 | offset = install_equiv_cpu_table(data); |
10de52d6 | 825 | if (offset < 0) { |
f58e1f53 | 826 | pr_err("failed to create equivalent cpu table\n"); |
2efb05e8 | 827 | return ret; |
80cc9f10 | 828 | } |
2efb05e8 | 829 | fw += offset; |
a0a29b62 DA |
830 | leftover = size - offset; |
831 | ||
2efb05e8 | 832 | if (*(u32 *)fw != UCODE_UCODE_TYPE) { |
be62adb4 | 833 | pr_err("invalid type field in container file section header\n"); |
2efb05e8 BP |
834 | free_equiv_cpu_table(); |
835 | return ret; | |
be62adb4 | 836 | } |
a0a29b62 | 837 | |
be62adb4 | 838 | while (leftover) { |
84516098 | 839 | crnt_size = verify_and_add_patch(family, fw, leftover); |
2efb05e8 BP |
840 | if (crnt_size < 0) |
841 | return ret; | |
d733689a | 842 | |
2efb05e8 BP |
843 | fw += crnt_size; |
844 | leftover -= crnt_size; | |
80cc9f10 | 845 | } |
a0a29b62 | 846 | |
2efb05e8 | 847 | return UCODE_OK; |
a0a29b62 DA |
848 | } |
849 | ||
2ef84b3b | 850 | enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) |
a76096a6 JS |
851 | { |
852 | enum ucode_state ret; | |
853 | ||
854 | /* free old equiv table */ | |
855 | free_equiv_cpu_table(); | |
856 | ||
84516098 | 857 | ret = __load_microcode_amd(family, data, size); |
a76096a6 JS |
858 | |
859 | if (ret != UCODE_OK) | |
860 | cleanup(); | |
861 | ||
fe055896 | 862 | #ifdef CONFIG_X86_32 |
757885e9 | 863 | /* save BSP's matching patch for early load */ |
2ef84b3b BP |
864 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { |
865 | struct ucode_patch *p = find_patch(cpu); | |
757885e9 | 866 | if (p) { |
5335ba5c BP |
867 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); |
868 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), | |
869 | PATCH_MAX_SIZE)); | |
757885e9 JS |
870 | } |
871 | } | |
872 | #endif | |
a76096a6 JS |
873 | return ret; |
874 | } | |
875 | ||
5b68edc9 AH |
876 | /* |
877 | * AMD microcode firmware naming convention, up to family 15h they are in | |
878 | * the legacy file: | |
879 | * | |
880 | * amd-ucode/microcode_amd.bin | |
881 | * | |
882 | * This legacy file is always smaller than 2K in size. | |
883 | * | |
2efb05e8 | 884 | * Beginning with family 15h, they are in family-specific firmware files: |
5b68edc9 AH |
885 | * |
886 | * amd-ucode/microcode_amd_fam15h.bin | |
887 | * amd-ucode/microcode_amd_fam16h.bin | |
888 | * ... | |
889 | * | |
890 | * These might be larger than 2K. | |
891 | */ | |
48e30685 BP |
892 | static enum ucode_state request_microcode_amd(int cpu, struct device *device, |
893 | bool refresh_fw) | |
a0a29b62 | 894 | { |
5b68edc9 | 895 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; |
5b68edc9 | 896 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
2efb05e8 BP |
897 | enum ucode_state ret = UCODE_NFOUND; |
898 | const struct firmware *fw; | |
899 | ||
900 | /* reload ucode container only on the boot cpu */ | |
901 | if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) | |
902 | return UCODE_OK; | |
5b68edc9 AH |
903 | |
904 | if (c->x86 >= 0x15) | |
905 | snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); | |
a0a29b62 | 906 | |
75da02b2 | 907 | if (request_firmware_direct(&fw, (const char *)fw_name, device)) { |
11f918d3 | 908 | pr_debug("failed to load file %s\n", fw_name); |
ffc7e8ac | 909 | goto out; |
3b2e3d85 | 910 | } |
a0a29b62 | 911 | |
ffc7e8ac BP |
912 | ret = UCODE_ERROR; |
913 | if (*(u32 *)fw->data != UCODE_MAGIC) { | |
258721ef | 914 | pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data); |
ffc7e8ac | 915 | goto fw_release; |
506f90ee BP |
916 | } |
917 | ||
2ef84b3b | 918 | ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); |
a0a29b62 | 919 | |
2efb05e8 | 920 | fw_release: |
ffc7e8ac | 921 | release_firmware(fw); |
3b2e3d85 | 922 | |
2efb05e8 | 923 | out: |
a0a29b62 DA |
924 | return ret; |
925 | } | |
926 | ||
871b72dd DA |
927 | static enum ucode_state |
928 | request_microcode_user(int cpu, const void __user *buf, size_t size) | |
a0a29b62 | 929 | { |
871b72dd | 930 | return UCODE_ERROR; |
80cc9f10 PO |
931 | } |
932 | ||
80cc9f10 PO |
933 | static void microcode_fini_cpu_amd(int cpu) |
934 | { | |
935 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
936 | ||
18dbc916 | 937 | uci->mc = NULL; |
80cc9f10 PO |
938 | } |
939 | ||
940 | static struct microcode_ops microcode_amd_ops = { | |
a0a29b62 | 941 | .request_microcode_user = request_microcode_user, |
ffc7e8ac | 942 | .request_microcode_fw = request_microcode_amd, |
80cc9f10 PO |
943 | .collect_cpu_info = collect_cpu_info_amd, |
944 | .apply_microcode = apply_microcode_amd, | |
945 | .microcode_fini_cpu = microcode_fini_cpu_amd, | |
946 | }; | |
947 | ||
18dbc916 | 948 | struct microcode_ops * __init init_amd_microcode(void) |
80cc9f10 | 949 | { |
9a2bc335 | 950 | struct cpuinfo_x86 *c = &boot_cpu_data; |
283c1f25 AH |
951 | |
952 | if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { | |
1b74dde7 | 953 | pr_warn("AMD CPU family 0x%x not supported\n", c->x86); |
283c1f25 AH |
954 | return NULL; |
955 | } | |
956 | ||
f7eb59dd BP |
957 | if (ucode_new_rev) |
958 | pr_info_once("microcode updated early to new patch_level=0x%08x\n", | |
959 | ucode_new_rev); | |
960 | ||
18dbc916 | 961 | return µcode_amd_ops; |
80cc9f10 | 962 | } |
f72c1a57 BP |
963 | |
964 | void __exit exit_amd_microcode(void) | |
965 | { | |
2efb05e8 | 966 | cleanup(); |
f72c1a57 | 967 | } |