Commit | Line | Data |
---|---|---|
4b3db708 CG |
1 | /* |
2 | * Extended Error Log driver | |
3 | * | |
4 | * Copyright (C) 2013 Intel Corp. | |
5 | * Author: Chen, Gong <gong.chen@intel.com> | |
6 | * | |
7 | * This file is licensed under GPLv2. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/acpi.h> | |
12 | #include <acpi/acpi_bus.h> | |
13 | #include <linux/cper.h> | |
14 | #include <linux/ratelimit.h> | |
42139eb3 | 15 | #include <linux/edac.h> |
4b3db708 CG |
16 | #include <asm/cpu.h> |
17 | #include <asm/mce.h> | |
18 | ||
19 | #include "apei/apei-internal.h" | |
20 | ||
21 | #define EXT_ELOG_ENTRY_MASK GENMASK_ULL(51, 0) /* elog entry address mask */ | |
22 | ||
23 | #define EXTLOG_DSM_REV 0x0 | |
24 | #define EXTLOG_FN_QUERY 0x0 | |
25 | #define EXTLOG_FN_ADDR 0x1 | |
26 | ||
27 | #define FLAG_OS_OPTIN BIT(0) | |
28 | #define EXTLOG_QUERY_L1_EXIST BIT(1) | |
29 | #define ELOG_ENTRY_VALID (1ULL<<63) | |
30 | #define ELOG_ENTRY_LEN 0x1000 | |
31 | ||
32 | #define EMCA_BUG \ | |
33 | "Can not request iomem region <0x%016llx-0x%016llx> - eMCA disabled\n" | |
34 | ||
35 | struct extlog_l1_head { | |
36 | u32 ver; /* Header Version */ | |
37 | u32 hdr_len; /* Header Length */ | |
38 | u64 total_len; /* entire L1 Directory length including this header */ | |
39 | u64 elog_base; /* MCA Error Log Directory base address */ | |
40 | u64 elog_len; /* MCA Error Log Directory length */ | |
41 | u32 flags; /* bit 0 - OS/VMM Opt-in */ | |
42 | u8 rev0[12]; | |
43 | u32 entries; /* Valid L1 Directory entries per logical processor */ | |
44 | u8 rev1[12]; | |
45 | }; | |
46 | ||
42139eb3 CG |
47 | static int old_edac_report_status; |
48 | ||
4b3db708 CG |
49 | static u8 extlog_dsm_uuid[] = "663E35AF-CC10-41A4-88EA-5470AF055295"; |
50 | ||
51 | /* L1 table related physical address */ | |
52 | static u64 elog_base; | |
53 | static size_t elog_size; | |
54 | static u64 l1_dirbase; | |
55 | static size_t l1_size; | |
56 | ||
57 | /* L1 table related virtual address */ | |
58 | static void __iomem *extlog_l1_addr; | |
59 | static void __iomem *elog_addr; | |
60 | ||
61 | static void *elog_buf; | |
62 | ||
63 | static u64 *l1_entry_base; | |
64 | static u32 l1_percpu_entry; | |
65 | ||
66 | #define ELOG_IDX(cpu, bank) \ | |
67 | (cpu_physical_id(cpu) * l1_percpu_entry + (bank)) | |
68 | ||
69 | #define ELOG_ENTRY_DATA(idx) \ | |
70 | (*(l1_entry_base + (idx))) | |
71 | ||
72 | #define ELOG_ENTRY_ADDR(phyaddr) \ | |
73 | (phyaddr - elog_base + (u8 *)elog_addr) | |
74 | ||
75 | static struct acpi_generic_status *extlog_elog_entry_check(int cpu, int bank) | |
76 | { | |
77 | int idx; | |
78 | u64 data; | |
79 | struct acpi_generic_status *estatus; | |
80 | ||
81 | WARN_ON(cpu < 0); | |
82 | idx = ELOG_IDX(cpu, bank); | |
83 | data = ELOG_ENTRY_DATA(idx); | |
84 | if ((data & ELOG_ENTRY_VALID) == 0) | |
85 | return NULL; | |
86 | ||
87 | data &= EXT_ELOG_ENTRY_MASK; | |
88 | estatus = (struct acpi_generic_status *)ELOG_ENTRY_ADDR(data); | |
89 | ||
90 | /* if no valid data in elog entry, just return */ | |
91 | if (estatus->block_status == 0) | |
92 | return NULL; | |
93 | ||
94 | return estatus; | |
95 | } | |
96 | ||
97 | static void __print_extlog_rcd(const char *pfx, | |
98 | struct acpi_generic_status *estatus, int cpu) | |
99 | { | |
100 | static atomic_t seqno; | |
101 | unsigned int curr_seqno; | |
102 | char pfx_seq[64]; | |
103 | ||
104 | if (!pfx) { | |
105 | if (estatus->error_severity <= CPER_SEV_CORRECTED) | |
106 | pfx = KERN_INFO; | |
107 | else | |
108 | pfx = KERN_ERR; | |
109 | } | |
110 | curr_seqno = atomic_inc_return(&seqno); | |
111 | snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}", pfx, curr_seqno); | |
112 | printk("%s""Hardware error detected on CPU%d\n", pfx_seq, cpu); | |
113 | cper_estatus_print(pfx_seq, estatus); | |
114 | } | |
115 | ||
116 | static int print_extlog_rcd(const char *pfx, | |
117 | struct acpi_generic_status *estatus, int cpu) | |
118 | { | |
119 | /* Not more than 2 messages every 5 seconds */ | |
120 | static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2); | |
121 | static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2); | |
122 | struct ratelimit_state *ratelimit; | |
123 | ||
124 | if (estatus->error_severity == CPER_SEV_CORRECTED || | |
125 | (estatus->error_severity == CPER_SEV_INFORMATIONAL)) | |
126 | ratelimit = &ratelimit_corrected; | |
127 | else | |
128 | ratelimit = &ratelimit_uncorrected; | |
129 | if (__ratelimit(ratelimit)) { | |
130 | __print_extlog_rcd(pfx, estatus, cpu); | |
131 | return 0; | |
132 | } | |
133 | ||
134 | return 1; | |
135 | } | |
136 | ||
137 | static int extlog_print(struct notifier_block *nb, unsigned long val, | |
138 | void *data) | |
139 | { | |
140 | struct mce *mce = (struct mce *)data; | |
141 | int bank = mce->bank; | |
142 | int cpu = mce->extcpu; | |
143 | struct acpi_generic_status *estatus; | |
144 | int rc; | |
145 | ||
146 | estatus = extlog_elog_entry_check(cpu, bank); | |
147 | if (estatus == NULL) | |
148 | return NOTIFY_DONE; | |
149 | ||
150 | memcpy(elog_buf, (void *)estatus, ELOG_ENTRY_LEN); | |
151 | /* clear record status to enable BIOS to update it again */ | |
152 | estatus->block_status = 0; | |
153 | ||
154 | rc = print_extlog_rcd(NULL, (struct acpi_generic_status *)elog_buf, cpu); | |
155 | ||
42139eb3 | 156 | return NOTIFY_STOP; |
4b3db708 CG |
157 | } |
158 | ||
159 | static int extlog_get_dsm(acpi_handle handle, int rev, int func, u64 *ret) | |
160 | { | |
161 | struct acpi_buffer buf = {ACPI_ALLOCATE_BUFFER, NULL}; | |
162 | struct acpi_object_list input; | |
163 | union acpi_object params[4], *obj; | |
164 | u8 uuid[16]; | |
165 | int i; | |
166 | ||
167 | acpi_str_to_uuid(extlog_dsm_uuid, uuid); | |
168 | input.count = 4; | |
169 | input.pointer = params; | |
170 | params[0].type = ACPI_TYPE_BUFFER; | |
171 | params[0].buffer.length = 16; | |
172 | params[0].buffer.pointer = uuid; | |
173 | params[1].type = ACPI_TYPE_INTEGER; | |
174 | params[1].integer.value = rev; | |
175 | params[2].type = ACPI_TYPE_INTEGER; | |
176 | params[2].integer.value = func; | |
177 | params[3].type = ACPI_TYPE_PACKAGE; | |
178 | params[3].package.count = 0; | |
179 | params[3].package.elements = NULL; | |
180 | ||
181 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) | |
182 | return -1; | |
183 | ||
184 | *ret = 0; | |
185 | obj = (union acpi_object *)buf.pointer; | |
186 | if (obj->type == ACPI_TYPE_INTEGER) { | |
187 | *ret = obj->integer.value; | |
188 | } else if (obj->type == ACPI_TYPE_BUFFER) { | |
189 | if (obj->buffer.length <= 8) { | |
190 | for (i = 0; i < obj->buffer.length; i++) | |
191 | *ret |= (obj->buffer.pointer[i] << (i * 8)); | |
192 | } | |
193 | } | |
194 | kfree(buf.pointer); | |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
199 | static bool extlog_get_l1addr(void) | |
200 | { | |
201 | acpi_handle handle; | |
202 | u64 ret; | |
203 | ||
204 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) | |
205 | return false; | |
206 | ||
207 | if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_QUERY, &ret) || | |
208 | !(ret & EXTLOG_QUERY_L1_EXIST)) | |
209 | return false; | |
210 | ||
211 | if (extlog_get_dsm(handle, EXTLOG_DSM_REV, EXTLOG_FN_ADDR, &ret)) | |
212 | return false; | |
213 | ||
214 | l1_dirbase = ret; | |
215 | /* Spec says L1 directory must be 4K aligned, bail out if it isn't */ | |
216 | if (l1_dirbase & ((1 << 12) - 1)) { | |
217 | pr_warn(FW_BUG "L1 Directory is invalid at physical %llx\n", | |
218 | l1_dirbase); | |
219 | return false; | |
220 | } | |
221 | ||
222 | return true; | |
223 | } | |
224 | static struct notifier_block extlog_mce_dec = { | |
225 | .notifier_call = extlog_print, | |
226 | }; | |
227 | ||
228 | static int __init extlog_init(void) | |
229 | { | |
230 | struct extlog_l1_head *l1_head; | |
231 | void __iomem *extlog_l1_hdr; | |
232 | size_t l1_hdr_size; | |
233 | struct resource *r; | |
234 | u64 cap; | |
235 | int rc; | |
236 | ||
42139eb3 CG |
237 | if (get_edac_report_status() == EDAC_REPORTING_FORCE) { |
238 | pr_warn("Not loading eMCA, error reporting force-enabled through EDAC.\n"); | |
239 | return -EPERM; | |
240 | } | |
4b3db708 | 241 | |
42139eb3 | 242 | rc = -ENODEV; |
4b3db708 CG |
243 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
244 | if (!(cap & MCG_ELOG_P)) | |
245 | return rc; | |
246 | ||
247 | if (!extlog_get_l1addr()) | |
248 | return rc; | |
249 | ||
250 | rc = -EINVAL; | |
251 | /* get L1 header to fetch necessary information */ | |
252 | l1_hdr_size = sizeof(struct extlog_l1_head); | |
253 | r = request_mem_region(l1_dirbase, l1_hdr_size, "L1 DIR HDR"); | |
254 | if (!r) { | |
255 | pr_warn(FW_BUG EMCA_BUG, | |
256 | (unsigned long long)l1_dirbase, | |
257 | (unsigned long long)l1_dirbase + l1_hdr_size); | |
258 | goto err; | |
259 | } | |
260 | ||
261 | extlog_l1_hdr = acpi_os_map_memory(l1_dirbase, l1_hdr_size); | |
262 | l1_head = (struct extlog_l1_head *)extlog_l1_hdr; | |
263 | l1_size = l1_head->total_len; | |
264 | l1_percpu_entry = l1_head->entries; | |
265 | elog_base = l1_head->elog_base; | |
266 | elog_size = l1_head->elog_len; | |
267 | acpi_os_unmap_memory(extlog_l1_hdr, l1_hdr_size); | |
268 | release_mem_region(l1_dirbase, l1_hdr_size); | |
269 | ||
270 | /* remap L1 header again based on completed information */ | |
271 | r = request_mem_region(l1_dirbase, l1_size, "L1 Table"); | |
272 | if (!r) { | |
273 | pr_warn(FW_BUG EMCA_BUG, | |
274 | (unsigned long long)l1_dirbase, | |
275 | (unsigned long long)l1_dirbase + l1_size); | |
276 | goto err; | |
277 | } | |
278 | extlog_l1_addr = acpi_os_map_memory(l1_dirbase, l1_size); | |
279 | l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size); | |
280 | ||
281 | /* remap elog table */ | |
282 | r = request_mem_region(elog_base, elog_size, "Elog Table"); | |
283 | if (!r) { | |
284 | pr_warn(FW_BUG EMCA_BUG, | |
285 | (unsigned long long)elog_base, | |
286 | (unsigned long long)elog_base + elog_size); | |
287 | goto err_release_l1_dir; | |
288 | } | |
289 | elog_addr = acpi_os_map_memory(elog_base, elog_size); | |
290 | ||
291 | rc = -ENOMEM; | |
292 | /* allocate buffer to save elog record */ | |
293 | elog_buf = kmalloc(ELOG_ENTRY_LEN, GFP_KERNEL); | |
294 | if (elog_buf == NULL) | |
295 | goto err_release_elog; | |
296 | ||
42139eb3 CG |
297 | /* |
298 | * eMCA event report method has higher priority than EDAC method, | |
299 | * unless EDAC event report method is mandatory. | |
300 | */ | |
301 | old_edac_report_status = get_edac_report_status(); | |
302 | set_edac_report_status(EDAC_REPORTING_DISABLED); | |
4b3db708 CG |
303 | mce_register_decode_chain(&extlog_mce_dec); |
304 | /* enable OS to be involved to take over management from BIOS */ | |
305 | ((struct extlog_l1_head *)extlog_l1_addr)->flags |= FLAG_OS_OPTIN; | |
306 | ||
307 | return 0; | |
308 | ||
309 | err_release_elog: | |
310 | if (elog_addr) | |
311 | acpi_os_unmap_memory(elog_addr, elog_size); | |
312 | release_mem_region(elog_base, elog_size); | |
313 | err_release_l1_dir: | |
314 | if (extlog_l1_addr) | |
315 | acpi_os_unmap_memory(extlog_l1_addr, l1_size); | |
316 | release_mem_region(l1_dirbase, l1_size); | |
317 | err: | |
318 | pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n"); | |
319 | return rc; | |
320 | } | |
321 | ||
322 | static void __exit extlog_exit(void) | |
323 | { | |
42139eb3 | 324 | set_edac_report_status(old_edac_report_status); |
4b3db708 CG |
325 | mce_unregister_decode_chain(&extlog_mce_dec); |
326 | ((struct extlog_l1_head *)extlog_l1_addr)->flags &= ~FLAG_OS_OPTIN; | |
327 | if (extlog_l1_addr) | |
328 | acpi_os_unmap_memory(extlog_l1_addr, l1_size); | |
329 | if (elog_addr) | |
330 | acpi_os_unmap_memory(elog_addr, elog_size); | |
331 | release_mem_region(elog_base, elog_size); | |
332 | release_mem_region(l1_dirbase, l1_size); | |
333 | kfree(elog_buf); | |
334 | } | |
335 | ||
336 | module_init(extlog_init); | |
337 | module_exit(extlog_exit); | |
338 | ||
339 | MODULE_AUTHOR("Chen, Gong <gong.chen@intel.com>"); | |
340 | MODULE_DESCRIPTION("Extended MCA Error Log Driver"); | |
341 | MODULE_LICENSE("GPL"); |