2 * AMD CPU Microcode Update Driver for Linux
4 * This driver allows to upgrade microcode on F10h AMD
7 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8 * 2013-2016 Borislav Petkov <bp@alien8.de>
10 * Author: Peter Oruba <peter.oruba@amd.com>
13 * Tigran Aivazian <aivazian.tigran@gmail.com>
16 * Copyright (C) 2013 Advanced Micro Devices, Inc.
18 * Author: Jacob Shin <jacob.shin@amd.com>
19 * Fixes: Borislav Petkov <bp@suse.de>
21 * Licensed under the terms of the GNU General Public
22 * License version 2. See file COPYING for details.
24 #define pr_fmt(fmt) "microcode: " fmt
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/pci.h>
34 #include <asm/microcode_amd.h>
35 #include <asm/microcode.h>
36 #include <asm/processor.h>
37 #include <asm/setup.h>
41 static struct equiv_cpu_entry *equiv_cpu_table;
44 * This points to the current valid container of microcode patches which we will
45 * save from the initrd/builtin before jettisoning its contents. @mc is the
46 * microcode patch we found to match.
49 struct microcode_amd *mc;
56 static u32 ucode_new_rev;
57 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
60 * Microcode patch container file is prepended to the initrd in cpio
61 * format. See Documentation/x86/microcode.txt
64 ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
66 static u16 find_equiv_id(struct equiv_cpu_entry *equiv_table, u32 sig)
68 for (; equiv_table && equiv_table->installed_cpu; equiv_table++) {
69 if (sig == equiv_table->installed_cpu)
70 return equiv_table->equiv_cpu;
77 * Check whether there is a valid microcode container file at the beginning
78 * of @buf of size @buf_size. Set @early to use this function in the early path.
80 static bool verify_container(const u8 *buf, size_t buf_size, bool early)
84 if (buf_size <= CONTAINER_HDR_SZ) {
86 pr_debug("Truncated microcode container header.\n");
91 cont_magic = *(const u32 *)buf;
92 if (cont_magic != UCODE_MAGIC) {
94 pr_debug("Invalid magic value (0x%08x).\n", cont_magic);
103 * Check whether there is a valid, non-truncated CPU equivalence table at the
104 * beginning of @buf of size @buf_size. Set @early to use this function in the
107 static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early)
109 const u32 *hdr = (const u32 *)buf;
110 u32 cont_type, equiv_tbl_len;
112 if (!verify_container(buf, buf_size, early))
116 if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
118 pr_debug("Wrong microcode container equivalence table type: %u.\n",
124 buf_size -= CONTAINER_HDR_SZ;
126 equiv_tbl_len = hdr[2];
127 if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) ||
128 buf_size < equiv_tbl_len) {
130 pr_debug("Truncated equivalence table.\n");
139 * Check whether there is a valid, non-truncated microcode patch section at the
140 * beginning of @buf of size @buf_size. Set @early to use this function in the
143 * On success, @sh_psize returns the patch size according to the section header,
147 __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early)
152 if (buf_size < SECTION_HDR_SIZE) {
154 pr_debug("Truncated patch section.\n");
159 hdr = (const u32 *)buf;
163 if (p_type != UCODE_UCODE_TYPE) {
165 pr_debug("Invalid type field (0x%x) in container file section header.\n",
171 if (p_size < sizeof(struct microcode_header_amd)) {
173 pr_debug("Patch of size %u too short.\n", p_size);
184 * Check whether the passed remaining file @buf_size is large enough to contain
185 * a patch of the indicated @sh_psize (and also whether this size does not
186 * exceed the per-family maximum). @sh_psize is the size read from the section
190 __verify_patch_size(u8 family, u32 sh_psize, unsigned int buf_size)
195 return min_t(u32, sh_psize, buf_size);
197 #define F1XH_MPB_MAX_SIZE 2048
198 #define F14H_MPB_MAX_SIZE 1824
202 max_size = F1XH_MPB_MAX_SIZE;
205 max_size = F14H_MPB_MAX_SIZE;
208 WARN(1, "%s: WTF family: 0x%x\n", __func__, family);
213 if (sh_psize > min_t(u32, buf_size, max_size)) {
214 pr_err("patch size mismatch\n");
222 verify_patch(u8 family, const u8 *buf, unsigned int buf_size, bool early)
226 if (!__verify_patch_section(buf, buf_size, &sh_psize, early))
229 * The section header length is not included in this indicated size
230 * but is present in the leftover file length so we need to subtract
231 * it before passing this value to the function below.
233 buf_size -= SECTION_HDR_SIZE;
236 * Check if the remaining buffer is big enough to contain a patch of
237 * size sh_psize, as the section claims.
239 if (buf_size < sh_psize) {
241 pr_debug("Patch of size %u truncated.\n", sh_psize);
246 return __verify_patch_size(family, sh_psize, buf_size);
250 * This scans the ucode blob for the proper container as we can have multiple
251 * containers glued together. Returns the equivalence ID from the equivalence
252 * table or 0 if none found.
253 * Returns the amount of bytes consumed while scanning. @desc contains all the
254 * data we're going to use in later stages of the application.
256 static ssize_t parse_container(u8 *ucode, ssize_t size, struct cont_desc *desc)
258 struct equiv_cpu_entry *eq;
259 ssize_t orig_size = size;
260 u32 *hdr = (u32 *)ucode;
264 /* Am I looking at an equivalence table header? */
265 if (hdr[0] != UCODE_MAGIC ||
266 hdr[1] != UCODE_EQUIV_CPU_TABLE_TYPE ||
268 return CONTAINER_HDR_SZ;
272 eq = (struct equiv_cpu_entry *)(buf + CONTAINER_HDR_SZ);
274 /* Find the equivalence ID of our CPU in this table: */
275 eq_id = find_equiv_id(eq, desc->cpuid_1_eax);
277 buf += hdr[2] + CONTAINER_HDR_SZ;
278 size -= hdr[2] + CONTAINER_HDR_SZ;
281 * Scan through the rest of the container to find where it ends. We do
282 * some basic sanity-checking too.
285 struct microcode_amd *mc;
290 if (hdr[0] != UCODE_UCODE_TYPE)
293 /* Sanity-check patch size. */
295 if (patch_size > PATCH_MAX_SIZE)
298 /* Skip patch section header: */
299 buf += SECTION_HDR_SIZE;
300 size -= SECTION_HDR_SIZE;
302 mc = (struct microcode_amd *)buf;
303 if (eq_id == mc->hdr.processor_rev_id) {
304 desc->psize = patch_size;
313 * If we have found a patch (desc->mc), it means we're looking at the
314 * container which has a patch for this CPU so return 0 to mean, @ucode
315 * already points to the proper container. Otherwise, we return the size
316 * we scanned so that we can advance to the next container in the
321 desc->size = orig_size - size;
326 return orig_size - size;
330 * Scan the ucode blob for the proper container as we can have multiple
331 * containers glued together.
333 static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
338 ssize_t s = parse_container(ucode, rem, desc);
347 static int __apply_microcode_amd(struct microcode_amd *mc)
351 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
353 /* verify patch application was successful */
354 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
355 if (rev != mc->hdr.patch_id)
362 * Early load occurs before we can vmalloc(). So we look for the microcode
363 * patch container file in initrd, traverse equivalent cpu table, look for a
364 * matching microcode patch, and update, all in initrd memory in place.
365 * When vmalloc() is available for use later -- on 64-bit during first AP load,
366 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
367 * load_microcode_amd() to save equivalent cpu table and microcode patches in
368 * kernel heap memory.
370 * Returns true if container found (sets @desc), false otherwise.
373 apply_microcode_early_amd(u32 cpuid_1_eax, void *ucode, size_t size, bool save_patch)
375 struct cont_desc desc = { 0 };
376 u8 (*patch)[PATCH_MAX_SIZE];
377 struct microcode_amd *mc;
378 u32 rev, dummy, *new_rev;
382 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
383 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
385 new_rev = &ucode_new_rev;
386 patch = &amd_ucode_patch;
389 desc.cpuid_1_eax = cpuid_1_eax;
391 scan_containers(ucode, size, &desc);
397 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
398 if (rev >= mc->hdr.patch_id)
401 if (!__apply_microcode_amd(mc)) {
402 *new_rev = mc->hdr.patch_id;
406 memcpy(patch, mc, min_t(u32, desc.psize, PATCH_MAX_SIZE));
412 static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family)
415 char fw_name[36] = "amd-ucode/microcode_amd.bin";
418 snprintf(fw_name, sizeof(fw_name),
419 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
421 return get_builtin_firmware(cp, fw_name);
427 static void __load_ucode_amd(unsigned int cpuid_1_eax, struct cpio_data *ret)
429 struct ucode_cpu_info *uci;
434 if (IS_ENABLED(CONFIG_X86_32)) {
435 uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info);
436 path = (const char *)__pa_nodebug(ucode_path);
439 uci = ucode_cpu_info;
444 if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
445 cp = find_microcode_in_initrd(path, use_pa);
447 /* Needed in load_microcode_amd() */
448 uci->cpu_sig.sig = cpuid_1_eax;
453 void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax)
455 struct cpio_data cp = { };
457 __load_ucode_amd(cpuid_1_eax, &cp);
458 if (!(cp.data && cp.size))
461 apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, true);
464 void load_ucode_amd_ap(unsigned int cpuid_1_eax)
466 struct microcode_amd *mc;
468 u32 *new_rev, rev, dummy;
470 if (IS_ENABLED(CONFIG_X86_32)) {
471 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
472 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
474 mc = (struct microcode_amd *)amd_ucode_patch;
475 new_rev = &ucode_new_rev;
478 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
480 /* Check whether we have saved a new patch already: */
481 if (*new_rev && rev < mc->hdr.patch_id) {
482 if (!__apply_microcode_amd(mc)) {
483 *new_rev = mc->hdr.patch_id;
488 __load_ucode_amd(cpuid_1_eax, &cp);
489 if (!(cp.data && cp.size))
492 apply_microcode_early_amd(cpuid_1_eax, cp.data, cp.size, false);
495 static enum ucode_state
496 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
498 int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
500 struct cont_desc desc = { 0 };
501 enum ucode_state ret;
504 cp = find_microcode_in_initrd(ucode_path, false);
505 if (!(cp.data && cp.size))
508 desc.cpuid_1_eax = cpuid_1_eax;
510 scan_containers(cp.data, cp.size, &desc);
514 ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
515 if (ret > UCODE_UPDATED)
521 void reload_ucode_amd(void)
523 struct microcode_amd *mc;
526 mc = (struct microcode_amd *)amd_ucode_patch;
528 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
530 if (rev < mc->hdr.patch_id) {
531 if (!__apply_microcode_amd(mc)) {
532 ucode_new_rev = mc->hdr.patch_id;
533 pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
537 static u16 __find_equiv_id(unsigned int cpu)
539 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
540 return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
543 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
547 BUG_ON(!equiv_cpu_table);
549 while (equiv_cpu_table[i].equiv_cpu != 0) {
550 if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
551 return equiv_cpu_table[i].installed_cpu;
558 * a small, trivial cache of per-family ucode patches
560 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
562 struct ucode_patch *p;
564 list_for_each_entry(p, µcode_cache, plist)
565 if (p->equiv_cpu == equiv_cpu)
570 static void update_cache(struct ucode_patch *new_patch)
572 struct ucode_patch *p;
574 list_for_each_entry(p, µcode_cache, plist) {
575 if (p->equiv_cpu == new_patch->equiv_cpu) {
576 if (p->patch_id >= new_patch->patch_id) {
577 /* we already have the latest patch */
578 kfree(new_patch->data);
583 list_replace(&p->plist, &new_patch->plist);
589 /* no patch found, add it */
590 list_add_tail(&new_patch->plist, µcode_cache);
593 static void free_cache(void)
595 struct ucode_patch *p, *tmp;
597 list_for_each_entry_safe(p, tmp, µcode_cache, plist) {
598 __list_del(p->plist.prev, p->plist.next);
604 static struct ucode_patch *find_patch(unsigned int cpu)
608 equiv_id = __find_equiv_id(cpu);
612 return cache_find_patch(equiv_id);
615 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
617 struct cpuinfo_x86 *c = &cpu_data(cpu);
618 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
619 struct ucode_patch *p;
621 csig->sig = cpuid_eax(0x00000001);
622 csig->rev = c->microcode;
625 * a patch could have been loaded early, set uci->mc so that
626 * mc_bp_resume() can call apply_microcode()
629 if (p && (p->patch_id == csig->rev))
632 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
637 static enum ucode_state apply_microcode_amd(int cpu)
639 struct cpuinfo_x86 *c = &cpu_data(cpu);
640 struct microcode_amd *mc_amd;
641 struct ucode_cpu_info *uci;
642 struct ucode_patch *p;
643 enum ucode_state ret;
646 BUG_ON(raw_smp_processor_id() != cpu);
648 uci = ucode_cpu_info + cpu;
657 rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
659 /* need to apply patch? */
660 if (rev >= mc_amd->hdr.patch_id) {
665 if (__apply_microcode_amd(mc_amd)) {
666 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
667 cpu, mc_amd->hdr.patch_id);
671 rev = mc_amd->hdr.patch_id;
674 pr_info("CPU%d: new patch_level=0x%08x\n", cpu, rev);
677 uci->cpu_sig.rev = rev;
680 /* Update boot_cpu_data's revision too, if we're on the BSP: */
681 if (c->cpu_index == boot_cpu_data.cpu_index)
682 boot_cpu_data.microcode = rev;
687 static int install_equiv_cpu_table(const u8 *buf)
689 unsigned int *ibuf = (unsigned int *)buf;
690 unsigned int type = ibuf[1];
691 unsigned int size = ibuf[2];
693 if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
694 pr_err("empty section/"
695 "invalid type field in container file section header\n");
699 equiv_cpu_table = vmalloc(size);
700 if (!equiv_cpu_table) {
701 pr_err("failed to allocate equivalent CPU table\n");
705 memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
707 /* add header length */
708 return size + CONTAINER_HDR_SZ;
711 static void free_equiv_cpu_table(void)
713 vfree(equiv_cpu_table);
714 equiv_cpu_table = NULL;
717 static void cleanup(void)
719 free_equiv_cpu_table();
724 * Return a non-negative value even if some of the checks failed so that
725 * we can skip over the next patch. If we return a negative value, we
726 * signal a grave error like a memory allocation has failed and the
727 * driver cannot continue functioning normally. In such cases, we tear
728 * down everything we've used up so far and exit.
730 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
732 struct microcode_header_amd *mc_hdr;
733 unsigned int patch_size, crnt_size;
734 struct ucode_patch *patch;
738 patch_size = verify_patch(family, fw, leftover, false);
740 pr_debug("Patch size mismatch.\n");
744 /* If initial rough pokes pass, we can start looking at the header. */
745 crnt_size = patch_size + SECTION_HDR_SIZE;
746 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
747 proc_id = mc_hdr->processor_rev_id;
749 proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
751 pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
755 /* check if patch is for the current family */
756 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
757 if (proc_fam != family)
760 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
761 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
766 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
768 pr_err("Patch allocation failure.\n");
772 patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
774 pr_err("Patch data allocation failure.\n");
779 INIT_LIST_HEAD(&patch->plist);
780 patch->patch_id = mc_hdr->patch_id;
781 patch->equiv_cpu = proc_id;
783 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
784 __func__, patch->patch_id, proc_id);
786 /* ... and add to cache. */
792 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
795 enum ucode_state ret = UCODE_ERROR;
796 unsigned int leftover;
801 offset = install_equiv_cpu_table(data);
803 pr_err("failed to create equivalent cpu table\n");
807 leftover = size - offset;
809 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
810 pr_err("invalid type field in container file section header\n");
811 free_equiv_cpu_table();
816 crnt_size = verify_and_add_patch(family, fw, leftover);
821 leftover -= crnt_size;
827 static enum ucode_state
828 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
830 struct ucode_patch *p;
831 enum ucode_state ret;
833 /* free old equiv table */
834 free_equiv_cpu_table();
836 ret = __load_microcode_amd(family, data, size);
837 if (ret != UCODE_OK) {
846 if (boot_cpu_data.microcode == p->patch_id)
852 /* save BSP's matching patch for early load */
856 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
857 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
863 * AMD microcode firmware naming convention, up to family 15h they are in
866 * amd-ucode/microcode_amd.bin
868 * This legacy file is always smaller than 2K in size.
870 * Beginning with family 15h, they are in family-specific firmware files:
872 * amd-ucode/microcode_amd_fam15h.bin
873 * amd-ucode/microcode_amd_fam16h.bin
876 * These might be larger than 2K.
878 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
881 char fw_name[36] = "amd-ucode/microcode_amd.bin";
882 struct cpuinfo_x86 *c = &cpu_data(cpu);
883 bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
884 enum ucode_state ret = UCODE_NFOUND;
885 const struct firmware *fw;
887 /* reload ucode container only on the boot cpu */
888 if (!refresh_fw || !bsp)
892 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
894 if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
895 pr_debug("failed to load file %s\n", fw_name);
900 if (*(u32 *)fw->data != UCODE_MAGIC) {
901 pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
905 ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
908 release_firmware(fw);
914 static enum ucode_state
915 request_microcode_user(int cpu, const void __user *buf, size_t size)
920 static void microcode_fini_cpu_amd(int cpu)
922 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
927 static struct microcode_ops microcode_amd_ops = {
928 .request_microcode_user = request_microcode_user,
929 .request_microcode_fw = request_microcode_amd,
930 .collect_cpu_info = collect_cpu_info_amd,
931 .apply_microcode = apply_microcode_amd,
932 .microcode_fini_cpu = microcode_fini_cpu_amd,
935 struct microcode_ops * __init init_amd_microcode(void)
937 struct cpuinfo_x86 *c = &boot_cpu_data;
939 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
940 pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
945 pr_info_once("microcode updated early to new patch_level=0x%08x\n",
948 return µcode_amd_ops;
951 void __exit exit_amd_microcode(void)