Commit | Line | Data |
---|---|---|
757885e9 JS |
1 | /* |
2 | * Copyright (C) 2013 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Author: Jacob Shin <jacob.shin@amd.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | */ | |
10 | ||
11 | #include <linux/earlycpio.h> | |
12 | ||
13 | #include <asm/cpu.h> | |
14 | #include <asm/setup.h> | |
15 | #include <asm/microcode_amd.h> | |
16 | ||
17 | static bool ucode_loaded; | |
18 | static u32 ucode_new_rev; | |
19 | ||
20 | /* | |
21 | * Microcode patch container file is prepended to the initrd in cpio format. | |
22 | * See Documentation/x86/early-microcode.txt | |
23 | */ | |
24 | static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin"; | |
25 | ||
26 | static struct cpio_data __init find_ucode_in_initrd(void) | |
27 | { | |
28 | long offset = 0; | |
29 | struct cpio_data cd; | |
30 | ||
31 | #ifdef CONFIG_X86_32 | |
32 | /* | |
33 | * On 32-bit, early load occurs before paging is turned on so we need | |
34 | * to use physical addresses. | |
35 | */ | |
36 | if (!(read_cr0() & X86_CR0_PG)) { | |
37 | struct boot_params *p; | |
38 | p = (struct boot_params *)__pa_nodebug(&boot_params); | |
39 | cd = find_cpio_data((char *)__pa_nodebug(ucode_path), | |
40 | (void *)p->hdr.ramdisk_image, p->hdr.ramdisk_size, | |
41 | &offset); | |
42 | } else | |
43 | #endif | |
44 | cd = find_cpio_data(ucode_path, | |
45 | (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET), | |
46 | boot_params.hdr.ramdisk_size, &offset); | |
47 | ||
48 | if (*(u32 *)cd.data != UCODE_MAGIC) { | |
49 | cd.data = NULL; | |
50 | cd.size = 0; | |
51 | } | |
52 | ||
53 | return cd; | |
54 | } | |
55 | ||
56 | /* | |
57 | * Early load occurs before we can vmalloc(). So we look for the microcode | |
58 | * patch container file in initrd, traverse equivalent cpu table, look for a | |
59 | * matching microcode patch, and update, all in initrd memory in place. | |
60 | * When vmalloc() is available for use later -- on 64-bit during first AP load, | |
61 | * and on 32-bit during save_microcode_in_initrd_amd() -- we can call | |
62 | * load_microcode_amd() to save equivalent cpu table and microcode patches in | |
63 | * kernel heap memory. | |
64 | */ | |
65 | static void __init apply_ucode_in_initrd(void) | |
66 | { | |
67 | struct cpio_data cd; | |
68 | struct equiv_cpu_entry *eq; | |
69 | u32 *header; | |
70 | u8 *data; | |
71 | u16 eq_id; | |
72 | int offset, left; | |
73 | u32 rev, dummy; | |
74 | u32 *new_rev; | |
75 | ||
76 | #ifdef CONFIG_X86_32 | |
77 | new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); | |
78 | #else | |
79 | new_rev = &ucode_new_rev; | |
80 | #endif | |
81 | cd = find_ucode_in_initrd(); | |
82 | if (!cd.data) | |
83 | return; | |
84 | ||
85 | data = cd.data; | |
86 | left = cd.size; | |
87 | header = (u32 *)data; | |
88 | ||
89 | /* find equiv cpu table */ | |
90 | ||
91 | if (header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */ | |
92 | header[2] == 0) /* size */ | |
93 | return; | |
94 | ||
95 | eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ); | |
96 | offset = header[2] + CONTAINER_HDR_SZ; | |
97 | data += offset; | |
98 | left -= offset; | |
99 | ||
100 | eq_id = find_equiv_id(eq, cpuid_eax(0x00000001)); | |
101 | if (!eq_id) | |
102 | return; | |
103 | ||
104 | /* find ucode and update if needed */ | |
105 | ||
106 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy); | |
107 | ||
108 | while (left > 0) { | |
109 | struct microcode_amd *mc; | |
110 | ||
111 | header = (u32 *)data; | |
112 | if (header[0] != UCODE_UCODE_TYPE || /* type */ | |
113 | header[1] == 0) /* size */ | |
114 | break; | |
115 | ||
116 | mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE); | |
117 | if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) | |
118 | if (__apply_microcode_amd(mc) == 0) { | |
119 | if (!(*new_rev)) | |
120 | *new_rev = mc->hdr.patch_id; | |
121 | break; | |
122 | } | |
123 | ||
124 | offset = header[1] + SECTION_HDR_SIZE; | |
125 | data += offset; | |
126 | left -= offset; | |
127 | } | |
128 | } | |
129 | ||
130 | void __init load_ucode_amd_bsp(void) | |
131 | { | |
132 | apply_ucode_in_initrd(); | |
133 | } | |
134 | ||
135 | #ifdef CONFIG_X86_32 | |
136 | u8 __cpuinitdata amd_bsp_mpb[MPB_MAX_SIZE]; | |
137 | ||
138 | /* | |
139 | * On 32-bit, since AP's early load occurs before paging is turned on, we | |
140 | * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during | |
141 | * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During | |
142 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which | |
143 | * is used upon resume from suspend. | |
144 | */ | |
145 | void __cpuinit load_ucode_amd_ap(void) | |
146 | { | |
147 | struct microcode_amd *mc; | |
148 | ||
149 | mc = (struct microcode_amd *)__pa_nodebug(amd_bsp_mpb); | |
150 | if (mc->hdr.patch_id && mc->hdr.processor_rev_id) | |
151 | __apply_microcode_amd(mc); | |
152 | else | |
153 | apply_ucode_in_initrd(); | |
154 | } | |
155 | ||
156 | static void __init collect_cpu_sig_on_bsp(void *arg) | |
157 | { | |
158 | unsigned int cpu = smp_processor_id(); | |
159 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | |
160 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | |
161 | } | |
162 | #else | |
163 | static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, | |
164 | struct ucode_cpu_info *uci) | |
165 | { | |
166 | u32 rev, eax; | |
167 | ||
168 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | |
169 | eax = cpuid_eax(0x00000001); | |
170 | ||
171 | uci->cpu_sig.sig = eax; | |
172 | uci->cpu_sig.rev = rev; | |
173 | c->microcode = rev; | |
174 | c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | |
175 | } | |
176 | ||
177 | void __cpuinit load_ucode_amd_ap(void) | |
178 | { | |
179 | unsigned int cpu = smp_processor_id(); | |
180 | ||
181 | collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); | |
182 | ||
183 | if (cpu && !ucode_loaded) { | |
184 | struct cpio_data cd = find_ucode_in_initrd(); | |
185 | if (load_microcode_amd(0, cd.data, cd.size) != UCODE_OK) | |
186 | return; | |
187 | ucode_loaded = true; | |
188 | } | |
189 | ||
190 | apply_microcode_amd(cpu); | |
191 | } | |
192 | #endif | |
193 | ||
194 | int __init save_microcode_in_initrd_amd(void) | |
195 | { | |
196 | enum ucode_state ret; | |
197 | struct cpio_data cd; | |
198 | #ifdef CONFIG_X86_32 | |
199 | unsigned int bsp = boot_cpu_data.cpu_index; | |
200 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | |
201 | ||
202 | if (!uci->cpu_sig.sig) | |
203 | smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); | |
204 | #endif | |
205 | if (ucode_new_rev) | |
206 | pr_info("microcode: updated early to new patch_level=0x%08x\n", | |
207 | ucode_new_rev); | |
208 | ||
209 | if (ucode_loaded) | |
210 | return 0; | |
211 | ||
212 | cd = find_ucode_in_initrd(); | |
213 | if (!cd.data) | |
214 | return -EINVAL; | |
215 | ||
216 | ret = load_microcode_amd(0, cd.data, cd.size); | |
217 | if (ret != UCODE_OK) | |
218 | return -EINVAL; | |
219 | ||
220 | ucode_loaded = true; | |
221 | return 0; | |
222 | } |