xen/mce: Add mcelog support for Xen platform
[linux-2.6-block.git] / drivers / xen / mcelog.c
CommitLineData
cef12ee5
LJ
1/******************************************************************************
2 * mcelog.c
3 * Driver for receiving and transferring machine check error infomation
4 *
5 * Copyright (c) 2012 Intel Corporation
6 * Author: Liu, Jinsong <jinsong.liu@intel.com>
7 * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
8 * Author: Ke, Liping <liping.ke@intel.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation; or, when distributed
13 * separately from the Linux kernel or incorporated into other
14 * software packages, subject to the following license:
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a copy
17 * of this source file (the "Software"), to deal in the Software without
18 * restriction, including without limitation the rights to use, copy, modify,
19 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20 * and to permit persons to whom the Software is furnished to do so, subject to
21 * the following conditions:
22 *
23 * The above copyright notice and this permission notice shall be included in
24 * all copies or substantial portions of the Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 * IN THE SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/types.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/fs.h>
40#include <linux/device.h>
41#include <linux/miscdevice.h>
42#include <linux/uaccess.h>
43#include <linux/capability.h>
44
45#include <xen/interface/xen.h>
46#include <xen/events.h>
47#include <xen/interface/vcpu.h>
48#include <xen/xen.h>
49#include <asm/xen/hypercall.h>
50#include <asm/xen/hypervisor.h>
51
52#define XEN_MCELOG "xen_mcelog: "
53
54static struct mc_info g_mi;
55static struct mcinfo_logical_cpu *g_physinfo;
56static uint32_t ncpus;
57
58static DEFINE_SPINLOCK(mcelog_lock);
59
60static struct xen_mce_log xen_mcelog = {
61 .signature = XEN_MCE_LOG_SIGNATURE,
62 .len = XEN_MCE_LOG_LEN,
63 .recordlen = sizeof(struct xen_mce),
64};
65
66static DEFINE_SPINLOCK(xen_mce_chrdev_state_lock);
67static int xen_mce_chrdev_open_count; /* #times opened */
68static int xen_mce_chrdev_open_exclu; /* already open exclusive? */
69
70static int xen_mce_chrdev_open(struct inode *inode, struct file *file)
71{
72 spin_lock(&xen_mce_chrdev_state_lock);
73
74 if (xen_mce_chrdev_open_exclu ||
75 (xen_mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
76 spin_unlock(&xen_mce_chrdev_state_lock);
77
78 return -EBUSY;
79 }
80
81 if (file->f_flags & O_EXCL)
82 xen_mce_chrdev_open_exclu = 1;
83 xen_mce_chrdev_open_count++;
84
85 spin_unlock(&xen_mce_chrdev_state_lock);
86
87 return nonseekable_open(inode, file);
88}
89
90static int xen_mce_chrdev_release(struct inode *inode, struct file *file)
91{
92 spin_lock(&xen_mce_chrdev_state_lock);
93
94 xen_mce_chrdev_open_count--;
95 xen_mce_chrdev_open_exclu = 0;
96
97 spin_unlock(&xen_mce_chrdev_state_lock);
98
99 return 0;
100}
101
102static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
103 size_t usize, loff_t *off)
104{
105 char __user *buf = ubuf;
106 unsigned num;
107 int i, err;
108
109 spin_lock(&mcelog_lock);
110
111 num = xen_mcelog.next;
112
113 /* Only supports full reads right now */
114 err = -EINVAL;
115 if (*off != 0 || usize < XEN_MCE_LOG_LEN*sizeof(struct xen_mce))
116 goto out;
117
118 err = 0;
119 for (i = 0; i < num; i++) {
120 struct xen_mce *m = &xen_mcelog.entry[i];
121
122 err |= copy_to_user(buf, m, sizeof(*m));
123 buf += sizeof(*m);
124 }
125
126 memset(xen_mcelog.entry, 0, num * sizeof(struct xen_mce));
127 xen_mcelog.next = 0;
128
129 if (err)
130 err = -EFAULT;
131
132out:
133 spin_unlock(&mcelog_lock);
134
135 return err ? err : buf - ubuf;
136}
137
138static long xen_mce_chrdev_ioctl(struct file *f, unsigned int cmd,
139 unsigned long arg)
140{
141 int __user *p = (int __user *)arg;
142
143 if (!capable(CAP_SYS_ADMIN))
144 return -EPERM;
145
146 switch (cmd) {
147 case MCE_GET_RECORD_LEN:
148 return put_user(sizeof(struct xen_mce), p);
149 case MCE_GET_LOG_LEN:
150 return put_user(XEN_MCE_LOG_LEN, p);
151 case MCE_GETCLEAR_FLAGS: {
152 unsigned flags;
153
154 do {
155 flags = xen_mcelog.flags;
156 } while (cmpxchg(&xen_mcelog.flags, flags, 0) != flags);
157
158 return put_user(flags, p);
159 }
160 default:
161 return -ENOTTY;
162 }
163}
164
165static const struct file_operations xen_mce_chrdev_ops = {
166 .open = xen_mce_chrdev_open,
167 .release = xen_mce_chrdev_release,
168 .read = xen_mce_chrdev_read,
169 .unlocked_ioctl = xen_mce_chrdev_ioctl,
170 .llseek = no_llseek,
171};
172
173static struct miscdevice xen_mce_chrdev_device = {
174 MISC_MCELOG_MINOR,
175 "mcelog",
176 &xen_mce_chrdev_ops,
177};
178
179/*
180 * Caller should hold the mcelog_lock
181 */
182static void xen_mce_log(struct xen_mce *mce)
183{
184 unsigned entry;
185
186 entry = xen_mcelog.next;
187
188 /*
189 * When the buffer fills up discard new entries.
190 * Assume that the earlier errors are the more
191 * interesting ones:
192 */
193 if (entry >= XEN_MCE_LOG_LEN) {
194 set_bit(XEN_MCE_OVERFLOW,
195 (unsigned long *)&xen_mcelog.flags);
196 return;
197 }
198
199 memcpy(xen_mcelog.entry + entry, mce, sizeof(struct xen_mce));
200
201 xen_mcelog.next++;
202}
203
204static int convert_log(struct mc_info *mi)
205{
206 struct mcinfo_common *mic;
207 struct mcinfo_global *mc_global;
208 struct mcinfo_bank *mc_bank;
209 struct xen_mce m;
210 uint32_t i;
211
212 mic = NULL;
213 x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
214 if (unlikely(!mic)) {
215 pr_warning(XEN_MCELOG "Failed to find global error info\n");
216 return -ENODEV;
217 }
218
219 memset(&m, 0, sizeof(struct xen_mce));
220
221 mc_global = (struct mcinfo_global *)mic;
222 m.mcgstatus = mc_global->mc_gstatus;
223 m.apicid = mc_global->mc_apicid;
224
225 for (i = 0; i < ncpus; i++)
226 if (g_physinfo[i].mc_apicid == m.apicid)
227 break;
228 if (unlikely(i == ncpus)) {
229 pr_warning(XEN_MCELOG "Failed to match cpu with apicid %d\n",
230 m.apicid);
231 return -ENODEV;
232 }
233
234 m.socketid = g_physinfo[i].mc_chipid;
235 m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
236 m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
237 m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value;
238
239 mic = NULL;
240 x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);
241 if (unlikely(!mic)) {
242 pr_warning(XEN_MCELOG "Fail to find bank error info\n");
243 return -ENODEV;
244 }
245
246 do {
247 if ((!mic) || (mic->size == 0) ||
248 (mic->type != MC_TYPE_GLOBAL &&
249 mic->type != MC_TYPE_BANK &&
250 mic->type != MC_TYPE_EXTENDED &&
251 mic->type != MC_TYPE_RECOVERY))
252 break;
253
254 if (mic->type == MC_TYPE_BANK) {
255 mc_bank = (struct mcinfo_bank *)mic;
256 m.misc = mc_bank->mc_misc;
257 m.status = mc_bank->mc_status;
258 m.addr = mc_bank->mc_addr;
259 m.tsc = mc_bank->mc_tsc;
260 m.bank = mc_bank->mc_bank;
261 m.finished = 1;
262 /*log this record*/
263 xen_mce_log(&m);
264 }
265 mic = x86_mcinfo_next(mic);
266 } while (1);
267
268 return 0;
269}
270
271static int mc_queue_handle(uint32_t flags)
272{
273 struct xen_mc mc_op;
274 int ret = 0;
275
276 mc_op.cmd = XEN_MC_fetch;
277 mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
278 set_xen_guest_handle(mc_op.u.mc_fetch.data, &g_mi);
279 do {
280 mc_op.u.mc_fetch.flags = flags;
281 ret = HYPERVISOR_mca(&mc_op);
282 if (ret) {
283 pr_err(XEN_MCELOG "Failed to fetch %s error log\n",
284 (flags == XEN_MC_URGENT) ?
285 "urgnet" : "nonurgent");
286 break;
287 }
288
289 if (mc_op.u.mc_fetch.flags & XEN_MC_NODATA ||
290 mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED)
291 break;
292 else {
293 ret = convert_log(&g_mi);
294 if (ret)
295 pr_warning(XEN_MCELOG
296 "Failed to convert this error log, "
297 "continue acking it anyway\n");
298
299 mc_op.u.mc_fetch.flags = flags | XEN_MC_ACK;
300 ret = HYPERVISOR_mca(&mc_op);
301 if (ret) {
302 pr_err(XEN_MCELOG
303 "Failed to ack previous error log\n");
304 break;
305 }
306 }
307 } while (1);
308
309 return ret;
310}
311
312/* virq handler for machine check error info*/
313static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
314{
315 int err;
316 unsigned long tmp;
317
318 spin_lock_irqsave(&mcelog_lock, tmp);
319
320 /* urgent mc_info */
321 err = mc_queue_handle(XEN_MC_URGENT);
322 if (err)
323 pr_err(XEN_MCELOG
324 "Failed to handle urgent mc_info queue, "
325 "continue handling nonurgent mc_info queue anyway.\n");
326
327 /* nonurgent mc_info */
328 err = mc_queue_handle(XEN_MC_NONURGENT);
329 if (err)
330 pr_err(XEN_MCELOG
331 "Failed to handle nonurgent mc_info queue.\n");
332
333 spin_unlock_irqrestore(&mcelog_lock, tmp);
334
335 return IRQ_HANDLED;
336}
337
338static int bind_virq_for_mce(void)
339{
340 int ret;
341 struct xen_mc mc_op;
342
343 memset(&mc_op, 0, sizeof(struct xen_mc));
344
345 /* Fetch physical CPU Numbers */
346 mc_op.cmd = XEN_MC_physcpuinfo;
347 mc_op.interface_version = XEN_MCA_INTERFACE_VERSION;
348 set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
349 ret = HYPERVISOR_mca(&mc_op);
350 if (ret) {
351 pr_err(XEN_MCELOG "Failed to get CPU numbers\n");
352 return ret;
353 }
354
355 /* Fetch each CPU Physical Info for later reference*/
356 ncpus = mc_op.u.mc_physcpuinfo.ncpus;
357 g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu),
358 GFP_KERNEL);
359 if (!g_physinfo)
360 return -ENOMEM;
361 set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
362 ret = HYPERVISOR_mca(&mc_op);
363 if (ret) {
364 pr_err(XEN_MCELOG "Failed to get CPU info\n");
365 kfree(g_physinfo);
366 return ret;
367 }
368
369 ret = bind_virq_to_irqhandler(VIRQ_MCA, 0,
370 xen_mce_interrupt, 0, "mce", NULL);
371 if (ret < 0) {
372 pr_err(XEN_MCELOG "Failed to bind virq\n");
373 kfree(g_physinfo);
374 return ret;
375 }
376
377 return 0;
378}
379
380static int __init xen_late_init_mcelog(void)
381{
382 /* Only DOM0 is responsible for MCE logging */
383 if (xen_initial_domain()) {
384 /* register character device /dev/mcelog for xen mcelog */
385 if (misc_register(&xen_mce_chrdev_device))
386 return -ENODEV;
387 return bind_virq_for_mce();
388 }
389
390 return -ENODEV;
391}
392device_initcall(xen_late_init_mcelog);