Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * eeh.c | |
3 | * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation | |
69376502 | 4 | * |
1da177e4 LT |
5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | |
7 | * the Free Software Foundation; either version 2 of the License, or | |
8 | * (at your option) any later version. | |
69376502 | 9 | * |
1da177e4 LT |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
69376502 | 14 | * |
1da177e4 LT |
15 | * You should have received a copy of the GNU General Public License |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
18 | */ | |
19 | ||
6dee3fb9 | 20 | #include <linux/delay.h> |
1da177e4 LT |
21 | #include <linux/init.h> |
22 | #include <linux/list.h> | |
1da177e4 LT |
23 | #include <linux/pci.h> |
24 | #include <linux/proc_fs.h> | |
25 | #include <linux/rbtree.h> | |
26 | #include <linux/seq_file.h> | |
27 | #include <linux/spinlock.h> | |
69376502 | 28 | #include <asm/atomic.h> |
1da177e4 | 29 | #include <asm/eeh.h> |
172ca926 | 30 | #include <asm/eeh_event.h> |
1da177e4 LT |
31 | #include <asm/io.h> |
32 | #include <asm/machdep.h> | |
172ca926 | 33 | #include <asm/ppc-pci.h> |
1da177e4 | 34 | #include <asm/rtas.h> |
1da177e4 | 35 | #include <asm/systemcfg.h> |
1da177e4 LT |
36 | |
37 | #undef DEBUG | |
38 | ||
39 | /** Overview: | |
40 | * EEH, or "Extended Error Handling" is a PCI bridge technology for | |
41 | * dealing with PCI bus errors that can't be dealt with within the | |
42 | * usual PCI framework, except by check-stopping the CPU. Systems | |
43 | * that are designed for high-availability/reliability cannot afford | |
44 | * to crash due to a "mere" PCI error, thus the need for EEH. | |
45 | * An EEH-capable bridge operates by converting a detected error | |
46 | * into a "slot freeze", taking the PCI adapter off-line, making | |
47 | * the slot behave, from the OS'es point of view, as if the slot | |
48 | * were "empty": all reads return 0xff's and all writes are silently | |
49 | * ignored. EEH slot isolation events can be triggered by parity | |
50 | * errors on the address or data busses (e.g. during posted writes), | |
69376502 LV |
51 | * which in turn might be caused by low voltage on the bus, dust, |
52 | * vibration, humidity, radioactivity or plain-old failed hardware. | |
1da177e4 LT |
53 | * |
54 | * Note, however, that one of the leading causes of EEH slot | |
55 | * freeze events are buggy device drivers, buggy device microcode, | |
56 | * or buggy device hardware. This is because any attempt by the | |
57 | * device to bus-master data to a memory address that is not | |
58 | * assigned to the device will trigger a slot freeze. (The idea | |
59 | * is to prevent devices-gone-wild from corrupting system memory). | |
60 | * Buggy hardware/drivers will have a miserable time co-existing | |
61 | * with EEH. | |
62 | * | |
63 | * Ideally, a PCI device driver, when suspecting that an isolation | |
64 | * event has occured (e.g. by reading 0xff's), will then ask EEH | |
65 | * whether this is the case, and then take appropriate steps to | |
66 | * reset the PCI slot, the PCI device, and then resume operations. | |
67 | * However, until that day, the checking is done here, with the | |
68 | * eeh_check_failure() routine embedded in the MMIO macros. If | |
69 | * the slot is found to be isolated, an "EEH Event" is synthesized | |
70 | * and sent out for processing. | |
71 | */ | |
72 | ||
5c1344e9 | 73 | /* If a device driver keeps reading an MMIO register in an interrupt |
1da177e4 LT |
74 | * handler after a slot isolation event has occurred, we assume it |
75 | * is broken and panic. This sets the threshold for how many read | |
76 | * attempts we allow before panicking. | |
77 | */ | |
5c1344e9 | 78 | #define EEH_MAX_FAILS 100000 |
1da177e4 LT |
79 | |
80 | /* RTAS tokens */ | |
81 | static int ibm_set_eeh_option; | |
82 | static int ibm_set_slot_reset; | |
83 | static int ibm_read_slot_reset_state; | |
84 | static int ibm_read_slot_reset_state2; | |
85 | static int ibm_slot_error_detail; | |
86 | ||
87 | static int eeh_subsystem_enabled; | |
88 | ||
fd761fd8 LV |
89 | /* Lock to avoid races due to multiple reports of an error */ |
90 | static DEFINE_SPINLOCK(confirm_error_lock); | |
91 | ||
1da177e4 LT |
92 | /* Buffer for reporting slot-error-detail rtas calls */ |
93 | static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX]; | |
94 | static DEFINE_SPINLOCK(slot_errbuf_lock); | |
95 | static int eeh_error_buf_size; | |
96 | ||
97 | /* System monitoring statistics */ | |
177bc936 LV |
98 | static DEFINE_PER_CPU(unsigned long, no_device); |
99 | static DEFINE_PER_CPU(unsigned long, no_dn); | |
100 | static DEFINE_PER_CPU(unsigned long, no_cfg_addr); | |
101 | static DEFINE_PER_CPU(unsigned long, ignored_check); | |
1da177e4 LT |
102 | static DEFINE_PER_CPU(unsigned long, total_mmio_ffs); |
103 | static DEFINE_PER_CPU(unsigned long, false_positives); | |
104 | static DEFINE_PER_CPU(unsigned long, ignored_failures); | |
105 | static DEFINE_PER_CPU(unsigned long, slot_resets); | |
106 | ||
107 | /** | |
108 | * The pci address cache subsystem. This subsystem places | |
109 | * PCI device address resources into a red-black tree, sorted | |
110 | * according to the address range, so that given only an i/o | |
111 | * address, the corresponding PCI device can be **quickly** | |
112 | * found. It is safe to perform an address lookup in an interrupt | |
113 | * context; this ability is an important feature. | |
114 | * | |
115 | * Currently, the only customer of this code is the EEH subsystem; | |
116 | * thus, this code has been somewhat tailored to suit EEH better. | |
117 | * In particular, the cache does *not* hold the addresses of devices | |
118 | * for which EEH is not enabled. | |
119 | * | |
120 | * (Implementation Note: The RB tree seems to be better/faster | |
121 | * than any hash algo I could think of for this problem, even | |
122 | * with the penalty of slow pointer chases for d-cache misses). | |
123 | */ | |
124 | struct pci_io_addr_range | |
125 | { | |
126 | struct rb_node rb_node; | |
127 | unsigned long addr_lo; | |
128 | unsigned long addr_hi; | |
129 | struct pci_dev *pcidev; | |
130 | unsigned int flags; | |
131 | }; | |
132 | ||
133 | static struct pci_io_addr_cache | |
134 | { | |
135 | struct rb_root rb_root; | |
136 | spinlock_t piar_lock; | |
137 | } pci_io_addr_cache_root; | |
138 | ||
139 | static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr) | |
140 | { | |
141 | struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; | |
142 | ||
143 | while (n) { | |
144 | struct pci_io_addr_range *piar; | |
145 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); | |
146 | ||
147 | if (addr < piar->addr_lo) { | |
148 | n = n->rb_left; | |
149 | } else { | |
150 | if (addr > piar->addr_hi) { | |
151 | n = n->rb_right; | |
152 | } else { | |
153 | pci_dev_get(piar->pcidev); | |
154 | return piar->pcidev; | |
155 | } | |
156 | } | |
157 | } | |
158 | ||
159 | return NULL; | |
160 | } | |
161 | ||
162 | /** | |
163 | * pci_get_device_by_addr - Get device, given only address | |
164 | * @addr: mmio (PIO) phys address or i/o port number | |
165 | * | |
166 | * Given an mmio phys address, or a port number, find a pci device | |
167 | * that implements this address. Be sure to pci_dev_put the device | |
168 | * when finished. I/O port numbers are assumed to be offset | |
169 | * from zero (that is, they do *not* have pci_io_addr added in). | |
170 | * It is safe to call this function within an interrupt. | |
171 | */ | |
172 | static struct pci_dev *pci_get_device_by_addr(unsigned long addr) | |
173 | { | |
174 | struct pci_dev *dev; | |
175 | unsigned long flags; | |
176 | ||
177 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | |
178 | dev = __pci_get_device_by_addr(addr); | |
179 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | |
180 | return dev; | |
181 | } | |
182 | ||
183 | #ifdef DEBUG | |
184 | /* | |
185 | * Handy-dandy debug print routine, does nothing more | |
186 | * than print out the contents of our addr cache. | |
187 | */ | |
188 | static void pci_addr_cache_print(struct pci_io_addr_cache *cache) | |
189 | { | |
190 | struct rb_node *n; | |
191 | int cnt = 0; | |
192 | ||
193 | n = rb_first(&cache->rb_root); | |
194 | while (n) { | |
195 | struct pci_io_addr_range *piar; | |
196 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); | |
982245f0 | 197 | printk(KERN_DEBUG "PCI: %s addr range %d [%lx-%lx]: %s\n", |
1da177e4 | 198 | (piar->flags & IORESOURCE_IO) ? "i/o" : "mem", cnt, |
982245f0 | 199 | piar->addr_lo, piar->addr_hi, pci_name(piar->pcidev)); |
1da177e4 LT |
200 | cnt++; |
201 | n = rb_next(n); | |
202 | } | |
203 | } | |
204 | #endif | |
205 | ||
206 | /* Insert address range into the rb tree. */ | |
207 | static struct pci_io_addr_range * | |
208 | pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo, | |
209 | unsigned long ahi, unsigned int flags) | |
210 | { | |
211 | struct rb_node **p = &pci_io_addr_cache_root.rb_root.rb_node; | |
212 | struct rb_node *parent = NULL; | |
213 | struct pci_io_addr_range *piar; | |
214 | ||
215 | /* Walk tree, find a place to insert into tree */ | |
216 | while (*p) { | |
217 | parent = *p; | |
218 | piar = rb_entry(parent, struct pci_io_addr_range, rb_node); | |
56b0fca3 | 219 | if (ahi < piar->addr_lo) { |
1da177e4 | 220 | p = &parent->rb_left; |
56b0fca3 | 221 | } else if (alo > piar->addr_hi) { |
1da177e4 LT |
222 | p = &parent->rb_right; |
223 | } else { | |
224 | if (dev != piar->pcidev || | |
225 | alo != piar->addr_lo || ahi != piar->addr_hi) { | |
226 | printk(KERN_WARNING "PIAR: overlapping address range\n"); | |
227 | } | |
228 | return piar; | |
229 | } | |
230 | } | |
231 | piar = (struct pci_io_addr_range *)kmalloc(sizeof(struct pci_io_addr_range), GFP_ATOMIC); | |
232 | if (!piar) | |
233 | return NULL; | |
234 | ||
235 | piar->addr_lo = alo; | |
236 | piar->addr_hi = ahi; | |
237 | piar->pcidev = dev; | |
238 | piar->flags = flags; | |
239 | ||
56b0fca3 LV |
240 | #ifdef DEBUG |
241 | printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n", | |
242 | alo, ahi, pci_name (dev)); | |
243 | #endif | |
244 | ||
1da177e4 LT |
245 | rb_link_node(&piar->rb_node, parent, p); |
246 | rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root); | |
247 | ||
248 | return piar; | |
249 | } | |
250 | ||
251 | static void __pci_addr_cache_insert_device(struct pci_dev *dev) | |
252 | { | |
253 | struct device_node *dn; | |
1635317f | 254 | struct pci_dn *pdn; |
1da177e4 LT |
255 | int i; |
256 | int inserted = 0; | |
257 | ||
258 | dn = pci_device_to_OF_node(dev); | |
259 | if (!dn) { | |
69376502 | 260 | printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev)); |
1da177e4 LT |
261 | return; |
262 | } | |
263 | ||
264 | /* Skip any devices for which EEH is not enabled. */ | |
69376502 | 265 | pdn = PCI_DN(dn); |
1635317f PM |
266 | if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || |
267 | pdn->eeh_mode & EEH_MODE_NOCHECK) { | |
1da177e4 | 268 | #ifdef DEBUG |
69376502 LV |
269 | printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n", |
270 | pci_name(dev), pdn->node->full_name); | |
1da177e4 LT |
271 | #endif |
272 | return; | |
273 | } | |
274 | ||
275 | /* The cache holds a reference to the device... */ | |
276 | pci_dev_get(dev); | |
277 | ||
278 | /* Walk resources on this device, poke them into the tree */ | |
279 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
280 | unsigned long start = pci_resource_start(dev,i); | |
281 | unsigned long end = pci_resource_end(dev,i); | |
282 | unsigned int flags = pci_resource_flags(dev,i); | |
283 | ||
284 | /* We are interested only bus addresses, not dma or other stuff */ | |
285 | if (0 == (flags & (IORESOURCE_IO | IORESOURCE_MEM))) | |
286 | continue; | |
287 | if (start == 0 || ~start == 0 || end == 0 || ~end == 0) | |
288 | continue; | |
289 | pci_addr_cache_insert(dev, start, end, flags); | |
290 | inserted = 1; | |
291 | } | |
292 | ||
293 | /* If there was nothing to add, the cache has no reference... */ | |
294 | if (!inserted) | |
295 | pci_dev_put(dev); | |
296 | } | |
297 | ||
298 | /** | |
299 | * pci_addr_cache_insert_device - Add a device to the address cache | |
300 | * @dev: PCI device whose I/O addresses we are interested in. | |
301 | * | |
302 | * In order to support the fast lookup of devices based on addresses, | |
303 | * we maintain a cache of devices that can be quickly searched. | |
304 | * This routine adds a device to that cache. | |
305 | */ | |
56b0fca3 | 306 | static void pci_addr_cache_insert_device(struct pci_dev *dev) |
1da177e4 LT |
307 | { |
308 | unsigned long flags; | |
309 | ||
310 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | |
311 | __pci_addr_cache_insert_device(dev); | |
312 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | |
313 | } | |
314 | ||
315 | static inline void __pci_addr_cache_remove_device(struct pci_dev *dev) | |
316 | { | |
317 | struct rb_node *n; | |
318 | int removed = 0; | |
319 | ||
320 | restart: | |
321 | n = rb_first(&pci_io_addr_cache_root.rb_root); | |
322 | while (n) { | |
323 | struct pci_io_addr_range *piar; | |
324 | piar = rb_entry(n, struct pci_io_addr_range, rb_node); | |
325 | ||
326 | if (piar->pcidev == dev) { | |
327 | rb_erase(n, &pci_io_addr_cache_root.rb_root); | |
328 | removed = 1; | |
329 | kfree(piar); | |
330 | goto restart; | |
331 | } | |
332 | n = rb_next(n); | |
333 | } | |
334 | ||
335 | /* The cache no longer holds its reference to this device... */ | |
336 | if (removed) | |
337 | pci_dev_put(dev); | |
338 | } | |
339 | ||
340 | /** | |
341 | * pci_addr_cache_remove_device - remove pci device from addr cache | |
342 | * @dev: device to remove | |
343 | * | |
344 | * Remove a device from the addr-cache tree. | |
345 | * This is potentially expensive, since it will walk | |
346 | * the tree multiple times (once per resource). | |
347 | * But so what; device removal doesn't need to be that fast. | |
348 | */ | |
56b0fca3 | 349 | static void pci_addr_cache_remove_device(struct pci_dev *dev) |
1da177e4 LT |
350 | { |
351 | unsigned long flags; | |
352 | ||
353 | spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); | |
354 | __pci_addr_cache_remove_device(dev); | |
355 | spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); | |
356 | } | |
357 | ||
358 | /** | |
359 | * pci_addr_cache_build - Build a cache of I/O addresses | |
360 | * | |
361 | * Build a cache of pci i/o addresses. This cache will be used to | |
362 | * find the pci device that corresponds to a given address. | |
363 | * This routine scans all pci busses to build the cache. | |
364 | * Must be run late in boot process, after the pci controllers | |
365 | * have been scaned for devices (after all device resources are known). | |
366 | */ | |
367 | void __init pci_addr_cache_build(void) | |
368 | { | |
369 | struct pci_dev *dev = NULL; | |
370 | ||
56b0fca3 LV |
371 | if (!eeh_subsystem_enabled) |
372 | return; | |
373 | ||
1da177e4 LT |
374 | spin_lock_init(&pci_io_addr_cache_root.piar_lock); |
375 | ||
376 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | |
377 | /* Ignore PCI bridges ( XXX why ??) */ | |
378 | if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) { | |
379 | continue; | |
380 | } | |
381 | pci_addr_cache_insert_device(dev); | |
382 | } | |
383 | ||
384 | #ifdef DEBUG | |
385 | /* Verify tree built up above, echo back the list of addrs. */ | |
386 | pci_addr_cache_print(&pci_io_addr_cache_root); | |
387 | #endif | |
388 | } | |
389 | ||
390 | /* --------------------------------------------------------------- */ | |
391 | /* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */ | |
392 | ||
df7242b1 LV |
393 | void eeh_slot_error_detail (struct pci_dn *pdn, int severity) |
394 | { | |
395 | unsigned long flags; | |
396 | int rc; | |
397 | ||
398 | /* Log the error with the rtas logger */ | |
399 | spin_lock_irqsave(&slot_errbuf_lock, flags); | |
400 | memset(slot_errbuf, 0, eeh_error_buf_size); | |
401 | ||
402 | rc = rtas_call(ibm_slot_error_detail, | |
403 | 8, 1, NULL, pdn->eeh_config_addr, | |
404 | BUID_HI(pdn->phb->buid), | |
405 | BUID_LO(pdn->phb->buid), NULL, 0, | |
406 | virt_to_phys(slot_errbuf), | |
407 | eeh_error_buf_size, | |
408 | severity); | |
409 | ||
410 | if (rc == 0) | |
411 | log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0); | |
412 | spin_unlock_irqrestore(&slot_errbuf_lock, flags); | |
413 | } | |
414 | ||
1da177e4 LT |
415 | /** |
416 | * read_slot_reset_state - Read the reset state of a device node's slot | |
417 | * @dn: device node to read | |
418 | * @rets: array to return results in | |
419 | */ | |
69376502 | 420 | static int read_slot_reset_state(struct pci_dn *pdn, int rets[]) |
1da177e4 LT |
421 | { |
422 | int token, outputs; | |
423 | ||
424 | if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) { | |
425 | token = ibm_read_slot_reset_state2; | |
426 | outputs = 4; | |
427 | } else { | |
428 | token = ibm_read_slot_reset_state; | |
69376502 | 429 | rets[2] = 0; /* fake PE Unavailable info */ |
1da177e4 LT |
430 | outputs = 3; |
431 | } | |
432 | ||
1635317f PM |
433 | return rtas_call(token, 3, outputs, rets, pdn->eeh_config_addr, |
434 | BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid)); | |
1da177e4 LT |
435 | } |
436 | ||
1da177e4 LT |
437 | /** |
438 | * eeh_token_to_phys - convert EEH address token to phys address | |
69376502 | 439 | * @token i/o token, should be address in the form 0xA.... |
1da177e4 LT |
440 | */ |
441 | static inline unsigned long eeh_token_to_phys(unsigned long token) | |
442 | { | |
443 | pte_t *ptep; | |
444 | unsigned long pa; | |
445 | ||
20cee16c | 446 | ptep = find_linux_pte(init_mm.pgd, token); |
1da177e4 LT |
447 | if (!ptep) |
448 | return token; | |
449 | pa = pte_pfn(*ptep) << PAGE_SHIFT; | |
450 | ||
451 | return pa | (token & (PAGE_SIZE-1)); | |
452 | } | |
453 | ||
fd761fd8 LV |
454 | /** |
455 | * Return the "partitionable endpoint" (pe) under which this device lies | |
456 | */ | |
457 | static struct device_node * find_device_pe(struct device_node *dn) | |
458 | { | |
459 | while ((dn->parent) && PCI_DN(dn->parent) && | |
460 | (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) { | |
461 | dn = dn->parent; | |
462 | } | |
463 | return dn; | |
464 | } | |
465 | ||
466 | /** Mark all devices that are peers of this device as failed. | |
467 | * Mark the device driver too, so that it can see the failure | |
468 | * immediately; this is critical, since some drivers poll | |
469 | * status registers in interrupts ... If a driver is polling, | |
470 | * and the slot is frozen, then the driver can deadlock in | |
471 | * an interrupt context, which is bad. | |
472 | */ | |
473 | ||
474 | static inline void __eeh_mark_slot (struct device_node *dn) | |
475 | { | |
476 | while (dn) { | |
477 | PCI_DN(dn)->eeh_mode |= EEH_MODE_ISOLATED; | |
478 | ||
479 | if (dn->child) | |
480 | __eeh_mark_slot (dn->child); | |
481 | dn = dn->sibling; | |
482 | } | |
483 | } | |
484 | ||
485 | static inline void __eeh_clear_slot (struct device_node *dn) | |
486 | { | |
487 | while (dn) { | |
488 | PCI_DN(dn)->eeh_mode &= ~EEH_MODE_ISOLATED; | |
489 | if (dn->child) | |
490 | __eeh_clear_slot (dn->child); | |
491 | dn = dn->sibling; | |
492 | } | |
493 | } | |
494 | ||
495 | static inline void eeh_clear_slot (struct device_node *dn) | |
496 | { | |
497 | unsigned long flags; | |
498 | spin_lock_irqsave(&confirm_error_lock, flags); | |
499 | __eeh_clear_slot (dn); | |
500 | spin_unlock_irqrestore(&confirm_error_lock, flags); | |
501 | } | |
502 | ||
1da177e4 LT |
503 | /** |
504 | * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze | |
505 | * @dn device node | |
506 | * @dev pci device, if known | |
507 | * | |
508 | * Check for an EEH failure for the given device node. Call this | |
509 | * routine if the result of a read was all 0xff's and you want to | |
510 | * find out if this is due to an EEH slot freeze. This routine | |
511 | * will query firmware for the EEH status. | |
512 | * | |
513 | * Returns 0 if there has not been an EEH error; otherwise returns | |
69376502 | 514 | * a non-zero value and queues up a slot isolation event notification. |
1da177e4 LT |
515 | * |
516 | * It is safe to call this routine in an interrupt context. | |
517 | */ | |
518 | int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) | |
519 | { | |
520 | int ret; | |
521 | int rets[3]; | |
522 | unsigned long flags; | |
1635317f | 523 | struct pci_dn *pdn; |
fd761fd8 LV |
524 | struct device_node *pe_dn; |
525 | int rc = 0; | |
1da177e4 LT |
526 | |
527 | __get_cpu_var(total_mmio_ffs)++; | |
528 | ||
529 | if (!eeh_subsystem_enabled) | |
530 | return 0; | |
531 | ||
177bc936 LV |
532 | if (!dn) { |
533 | __get_cpu_var(no_dn)++; | |
1da177e4 | 534 | return 0; |
177bc936 | 535 | } |
69376502 | 536 | pdn = PCI_DN(dn); |
1da177e4 LT |
537 | |
538 | /* Access to IO BARs might get this far and still not want checking. */ | |
f8632c82 | 539 | if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || |
1635317f | 540 | pdn->eeh_mode & EEH_MODE_NOCHECK) { |
177bc936 LV |
541 | __get_cpu_var(ignored_check)++; |
542 | #ifdef DEBUG | |
f8632c82 LV |
543 | printk ("EEH:ignored check (%x) for %s %s\n", |
544 | pdn->eeh_mode, pci_name (dev), dn->full_name); | |
177bc936 | 545 | #endif |
1da177e4 LT |
546 | return 0; |
547 | } | |
548 | ||
1635317f | 549 | if (!pdn->eeh_config_addr) { |
177bc936 | 550 | __get_cpu_var(no_cfg_addr)++; |
1da177e4 LT |
551 | return 0; |
552 | } | |
553 | ||
fd761fd8 LV |
554 | /* If we already have a pending isolation event for this |
555 | * slot, we know it's bad already, we don't need to check. | |
556 | * Do this checking under a lock; as multiple PCI devices | |
557 | * in one slot might report errors simultaneously, and we | |
558 | * only want one error recovery routine running. | |
1da177e4 | 559 | */ |
fd761fd8 LV |
560 | spin_lock_irqsave(&confirm_error_lock, flags); |
561 | rc = 1; | |
1635317f | 562 | if (pdn->eeh_mode & EEH_MODE_ISOLATED) { |
5c1344e9 LV |
563 | pdn->eeh_check_count ++; |
564 | if (pdn->eeh_check_count >= EEH_MAX_FAILS) { | |
565 | printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n", | |
566 | pdn->eeh_check_count); | |
567 | dump_stack(); | |
568 | ||
1da177e4 | 569 | /* re-read the slot reset state */ |
69376502 | 570 | if (read_slot_reset_state(pdn, rets) != 0) |
1da177e4 | 571 | rets[0] = -1; /* reset state unknown */ |
5c1344e9 LV |
572 | |
573 | /* If we are here, then we hit an infinite loop. Stop. */ | |
574 | panic("EEH: MMIO halt (%d) on device:%s\n", rets[0], pci_name(dev)); | |
1da177e4 | 575 | } |
fd761fd8 | 576 | goto dn_unlock; |
1da177e4 LT |
577 | } |
578 | ||
579 | /* | |
580 | * Now test for an EEH failure. This is VERY expensive. | |
581 | * Note that the eeh_config_addr may be a parent device | |
582 | * in the case of a device behind a bridge, or it may be | |
583 | * function zero of a multi-function device. | |
584 | * In any case they must share a common PHB. | |
585 | */ | |
69376502 | 586 | ret = read_slot_reset_state(pdn, rets); |
76e6faf7 LV |
587 | |
588 | /* If the call to firmware failed, punt */ | |
589 | if (ret != 0) { | |
590 | printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n", | |
591 | ret, dn->full_name); | |
592 | __get_cpu_var(false_positives)++; | |
fd761fd8 LV |
593 | rc = 0; |
594 | goto dn_unlock; | |
76e6faf7 LV |
595 | } |
596 | ||
597 | /* If EEH is not supported on this device, punt. */ | |
598 | if (rets[1] != 1) { | |
599 | printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n", | |
600 | ret, dn->full_name); | |
601 | __get_cpu_var(false_positives)++; | |
fd761fd8 LV |
602 | rc = 0; |
603 | goto dn_unlock; | |
76e6faf7 LV |
604 | } |
605 | ||
606 | /* If not the kind of error we know about, punt. */ | |
607 | if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) { | |
608 | __get_cpu_var(false_positives)++; | |
fd761fd8 LV |
609 | rc = 0; |
610 | goto dn_unlock; | |
76e6faf7 LV |
611 | } |
612 | ||
613 | /* Note that config-io to empty slots may fail; | |
614 | * we recognize empty because they don't have children. */ | |
615 | if ((rets[0] == 5) && (dn->child == NULL)) { | |
1da177e4 | 616 | __get_cpu_var(false_positives)++; |
fd761fd8 LV |
617 | rc = 0; |
618 | goto dn_unlock; | |
1da177e4 LT |
619 | } |
620 | ||
fd761fd8 LV |
621 | __get_cpu_var(slot_resets)++; |
622 | ||
623 | /* Avoid repeated reports of this failure, including problems | |
624 | * with other functions on this device, and functions under | |
625 | * bridges. */ | |
626 | pe_dn = find_device_pe (dn); | |
627 | __eeh_mark_slot (pe_dn); | |
628 | spin_unlock_irqrestore(&confirm_error_lock, flags); | |
1da177e4 | 629 | |
172ca926 LV |
630 | eeh_send_failure_event (dn, dev, rets[0], rets[2]); |
631 | ||
1da177e4 LT |
632 | /* Most EEH events are due to device driver bugs. Having |
633 | * a stack trace will help the device-driver authors figure | |
634 | * out what happened. So print that out. */ | |
76e6faf7 | 635 | if (rets[0] != 5) dump_stack(); |
fd761fd8 LV |
636 | return 1; |
637 | ||
638 | dn_unlock: | |
639 | spin_unlock_irqrestore(&confirm_error_lock, flags); | |
640 | return rc; | |
1da177e4 LT |
641 | } |
642 | ||
fd761fd8 | 643 | EXPORT_SYMBOL_GPL(eeh_dn_check_failure); |
1da177e4 LT |
644 | |
645 | /** | |
646 | * eeh_check_failure - check if all 1's data is due to EEH slot freeze | |
647 | * @token i/o token, should be address in the form 0xA.... | |
648 | * @val value, should be all 1's (XXX why do we need this arg??) | |
649 | * | |
1da177e4 LT |
650 | * Check for an EEH failure at the given token address. Call this |
651 | * routine if the result of a read was all 0xff's and you want to | |
652 | * find out if this is due to an EEH slot freeze event. This routine | |
653 | * will query firmware for the EEH status. | |
654 | * | |
655 | * Note this routine is safe to call in an interrupt context. | |
656 | */ | |
657 | unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) | |
658 | { | |
659 | unsigned long addr; | |
660 | struct pci_dev *dev; | |
661 | struct device_node *dn; | |
662 | ||
663 | /* Finding the phys addr + pci device; this is pretty quick. */ | |
664 | addr = eeh_token_to_phys((unsigned long __force) token); | |
665 | dev = pci_get_device_by_addr(addr); | |
177bc936 LV |
666 | if (!dev) { |
667 | __get_cpu_var(no_device)++; | |
1da177e4 | 668 | return val; |
177bc936 | 669 | } |
1da177e4 LT |
670 | |
671 | dn = pci_device_to_OF_node(dev); | |
672 | eeh_dn_check_failure (dn, dev); | |
673 | ||
674 | pci_dev_put(dev); | |
675 | return val; | |
676 | } | |
677 | ||
678 | EXPORT_SYMBOL(eeh_check_failure); | |
679 | ||
6dee3fb9 LV |
680 | /* ------------------------------------------------------------- */ |
681 | /* The code below deals with error recovery */ | |
682 | ||
683 | /** Return negative value if a permanent error, else return | |
684 | * a number of milliseconds to wait until the PCI slot is | |
685 | * ready to be used. | |
686 | */ | |
687 | static int | |
688 | eeh_slot_availability(struct pci_dn *pdn) | |
689 | { | |
690 | int rc; | |
691 | int rets[3]; | |
692 | ||
693 | rc = read_slot_reset_state(pdn, rets); | |
694 | ||
695 | if (rc) return rc; | |
696 | ||
697 | if (rets[1] == 0) return -1; /* EEH is not supported */ | |
698 | if (rets[0] == 0) return 0; /* Oll Korrect */ | |
699 | if (rets[0] == 5) { | |
700 | if (rets[2] == 0) return -1; /* permanently unavailable */ | |
701 | return rets[2]; /* number of millisecs to wait */ | |
702 | } | |
703 | return -1; | |
704 | } | |
705 | ||
706 | /** rtas_pci_slot_reset raises/lowers the pci #RST line | |
707 | * state: 1/0 to raise/lower the #RST | |
708 | * | |
709 | * Clear the EEH-frozen condition on a slot. This routine | |
710 | * asserts the PCI #RST line if the 'state' argument is '1', | |
711 | * and drops the #RST line if 'state is '0'. This routine is | |
712 | * safe to call in an interrupt context. | |
713 | * | |
714 | */ | |
715 | ||
716 | static void | |
717 | rtas_pci_slot_reset(struct pci_dn *pdn, int state) | |
718 | { | |
719 | int rc; | |
720 | ||
721 | BUG_ON (pdn==NULL); | |
722 | ||
723 | if (!pdn->phb) { | |
724 | printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n", | |
725 | pdn->node->full_name); | |
726 | return; | |
727 | } | |
728 | ||
729 | rc = rtas_call(ibm_set_slot_reset,4,1, NULL, | |
730 | pdn->eeh_config_addr, | |
731 | BUID_HI(pdn->phb->buid), | |
732 | BUID_LO(pdn->phb->buid), | |
733 | state); | |
734 | if (rc) { | |
735 | printk (KERN_WARNING "EEH: Unable to reset the failed slot, (%d) #RST=%d dn=%s\n", | |
736 | rc, state, pdn->node->full_name); | |
737 | return; | |
738 | } | |
739 | ||
740 | if (state == 0) | |
741 | eeh_clear_slot (pdn->node->parent->child); | |
742 | } | |
743 | ||
744 | /** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second | |
745 | * dn -- device node to be reset. | |
746 | */ | |
747 | ||
748 | void | |
749 | rtas_set_slot_reset(struct pci_dn *pdn) | |
750 | { | |
751 | int i, rc; | |
752 | ||
753 | rtas_pci_slot_reset (pdn, 1); | |
754 | ||
755 | /* The PCI bus requires that the reset be held high for at least | |
756 | * a 100 milliseconds. We wait a bit longer 'just in case'. */ | |
757 | ||
758 | #define PCI_BUS_RST_HOLD_TIME_MSEC 250 | |
759 | msleep (PCI_BUS_RST_HOLD_TIME_MSEC); | |
760 | rtas_pci_slot_reset (pdn, 0); | |
761 | ||
762 | /* After a PCI slot has been reset, the PCI Express spec requires | |
763 | * a 1.5 second idle time for the bus to stabilize, before starting | |
764 | * up traffic. */ | |
765 | #define PCI_BUS_SETTLE_TIME_MSEC 1800 | |
766 | msleep (PCI_BUS_SETTLE_TIME_MSEC); | |
767 | ||
768 | /* Now double check with the firmware to make sure the device is | |
769 | * ready to be used; if not, wait for recovery. */ | |
770 | for (i=0; i<10; i++) { | |
771 | rc = eeh_slot_availability (pdn); | |
772 | if (rc <= 0) break; | |
773 | ||
774 | msleep (rc+100); | |
775 | } | |
776 | } | |
777 | ||
172ca926 LV |
778 | /* ------------------------------------------------------------- */ |
779 | /* The code below deals with enabling EEH for devices during the | |
780 | * early boot sequence. EEH must be enabled before any PCI probing | |
781 | * can be done. | |
782 | */ | |
783 | ||
784 | #define EEH_ENABLE 1 | |
785 | ||
1da177e4 LT |
786 | struct eeh_early_enable_info { |
787 | unsigned int buid_hi; | |
788 | unsigned int buid_lo; | |
789 | }; | |
790 | ||
791 | /* Enable eeh for the given device node. */ | |
792 | static void *early_enable_eeh(struct device_node *dn, void *data) | |
793 | { | |
794 | struct eeh_early_enable_info *info = data; | |
795 | int ret; | |
796 | char *status = get_property(dn, "status", NULL); | |
797 | u32 *class_code = (u32 *)get_property(dn, "class-code", NULL); | |
798 | u32 *vendor_id = (u32 *)get_property(dn, "vendor-id", NULL); | |
799 | u32 *device_id = (u32 *)get_property(dn, "device-id", NULL); | |
800 | u32 *regs; | |
801 | int enable; | |
69376502 | 802 | struct pci_dn *pdn = PCI_DN(dn); |
1da177e4 | 803 | |
1635317f | 804 | pdn->eeh_mode = 0; |
5c1344e9 LV |
805 | pdn->eeh_check_count = 0; |
806 | pdn->eeh_freeze_count = 0; | |
1da177e4 LT |
807 | |
808 | if (status && strcmp(status, "ok") != 0) | |
809 | return NULL; /* ignore devices with bad status */ | |
810 | ||
811 | /* Ignore bad nodes. */ | |
812 | if (!class_code || !vendor_id || !device_id) | |
813 | return NULL; | |
814 | ||
815 | /* There is nothing to check on PCI to ISA bridges */ | |
816 | if (dn->type && !strcmp(dn->type, "isa")) { | |
1635317f | 817 | pdn->eeh_mode |= EEH_MODE_NOCHECK; |
1da177e4 LT |
818 | return NULL; |
819 | } | |
820 | ||
821 | /* | |
822 | * Now decide if we are going to "Disable" EEH checking | |
823 | * for this device. We still run with the EEH hardware active, | |
824 | * but we won't be checking for ff's. This means a driver | |
825 | * could return bad data (very bad!), an interrupt handler could | |
826 | * hang waiting on status bits that won't change, etc. | |
827 | * But there are a few cases like display devices that make sense. | |
828 | */ | |
829 | enable = 1; /* i.e. we will do checking */ | |
830 | if ((*class_code >> 16) == PCI_BASE_CLASS_DISPLAY) | |
831 | enable = 0; | |
832 | ||
833 | if (!enable) | |
1635317f | 834 | pdn->eeh_mode |= EEH_MODE_NOCHECK; |
1da177e4 LT |
835 | |
836 | /* Ok... see if this device supports EEH. Some do, some don't, | |
837 | * and the only way to find out is to check each and every one. */ | |
838 | regs = (u32 *)get_property(dn, "reg", NULL); | |
839 | if (regs) { | |
840 | /* First register entry is addr (00BBSS00) */ | |
841 | /* Try to enable eeh */ | |
842 | ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, | |
172ca926 LV |
843 | regs[0], info->buid_hi, info->buid_lo, |
844 | EEH_ENABLE); | |
845 | ||
1da177e4 LT |
846 | if (ret == 0) { |
847 | eeh_subsystem_enabled = 1; | |
1635317f PM |
848 | pdn->eeh_mode |= EEH_MODE_SUPPORTED; |
849 | pdn->eeh_config_addr = regs[0]; | |
1da177e4 LT |
850 | #ifdef DEBUG |
851 | printk(KERN_DEBUG "EEH: %s: eeh enabled\n", dn->full_name); | |
852 | #endif | |
853 | } else { | |
854 | ||
855 | /* This device doesn't support EEH, but it may have an | |
856 | * EEH parent, in which case we mark it as supported. */ | |
69376502 | 857 | if (dn->parent && PCI_DN(dn->parent) |
1635317f | 858 | && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) { |
1da177e4 | 859 | /* Parent supports EEH. */ |
1635317f PM |
860 | pdn->eeh_mode |= EEH_MODE_SUPPORTED; |
861 | pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr; | |
1da177e4 LT |
862 | return NULL; |
863 | } | |
864 | } | |
865 | } else { | |
866 | printk(KERN_WARNING "EEH: %s: unable to get reg property.\n", | |
867 | dn->full_name); | |
868 | } | |
869 | ||
69376502 | 870 | return NULL; |
1da177e4 LT |
871 | } |
872 | ||
873 | /* | |
874 | * Initialize EEH by trying to enable it for all of the adapters in the system. | |
875 | * As a side effect we can determine here if eeh is supported at all. | |
876 | * Note that we leave EEH on so failed config cycles won't cause a machine | |
877 | * check. If a user turns off EEH for a particular adapter they are really | |
878 | * telling Linux to ignore errors. Some hardware (e.g. POWER5) won't | |
879 | * grant access to a slot if EEH isn't enabled, and so we always enable | |
880 | * EEH for all slots/all devices. | |
881 | * | |
882 | * The eeh-force-off option disables EEH checking globally, for all slots. | |
883 | * Even if force-off is set, the EEH hardware is still enabled, so that | |
884 | * newer systems can boot. | |
885 | */ | |
886 | void __init eeh_init(void) | |
887 | { | |
888 | struct device_node *phb, *np; | |
889 | struct eeh_early_enable_info info; | |
890 | ||
fd761fd8 | 891 | spin_lock_init(&confirm_error_lock); |
df7242b1 LV |
892 | spin_lock_init(&slot_errbuf_lock); |
893 | ||
1da177e4 LT |
894 | np = of_find_node_by_path("/rtas"); |
895 | if (np == NULL) | |
896 | return; | |
897 | ||
898 | ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); | |
899 | ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); | |
900 | ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2"); | |
901 | ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state"); | |
902 | ibm_slot_error_detail = rtas_token("ibm,slot-error-detail"); | |
903 | ||
904 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) | |
905 | return; | |
906 | ||
907 | eeh_error_buf_size = rtas_token("rtas-error-log-max"); | |
908 | if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) { | |
909 | eeh_error_buf_size = 1024; | |
910 | } | |
911 | if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) { | |
912 | printk(KERN_WARNING "EEH: rtas-error-log-max is bigger than allocated " | |
913 | "buffer ! (%d vs %d)", eeh_error_buf_size, RTAS_ERROR_LOG_MAX); | |
914 | eeh_error_buf_size = RTAS_ERROR_LOG_MAX; | |
915 | } | |
916 | ||
917 | /* Enable EEH for all adapters. Note that eeh requires buid's */ | |
918 | for (phb = of_find_node_by_name(NULL, "pci"); phb; | |
919 | phb = of_find_node_by_name(phb, "pci")) { | |
920 | unsigned long buid; | |
921 | ||
922 | buid = get_phb_buid(phb); | |
69376502 | 923 | if (buid == 0 || PCI_DN(phb) == NULL) |
1da177e4 LT |
924 | continue; |
925 | ||
926 | info.buid_lo = BUID_LO(buid); | |
927 | info.buid_hi = BUID_HI(buid); | |
928 | traverse_pci_devices(phb, early_enable_eeh, &info); | |
929 | } | |
930 | ||
931 | if (eeh_subsystem_enabled) | |
932 | printk(KERN_INFO "EEH: PCI Enhanced I/O Error Handling Enabled\n"); | |
933 | else | |
934 | printk(KERN_WARNING "EEH: No capable adapters found\n"); | |
935 | } | |
936 | ||
937 | /** | |
938 | * eeh_add_device_early - enable EEH for the indicated device_node | |
939 | * @dn: device node for which to set up EEH | |
940 | * | |
941 | * This routine must be used to perform EEH initialization for PCI | |
942 | * devices that were added after system boot (e.g. hotplug, dlpar). | |
943 | * This routine must be called before any i/o is performed to the | |
944 | * adapter (inluding any config-space i/o). | |
945 | * Whether this actually enables EEH or not for this device depends | |
946 | * on the CEC architecture, type of the device, on earlier boot | |
947 | * command-line arguments & etc. | |
948 | */ | |
949 | void eeh_add_device_early(struct device_node *dn) | |
950 | { | |
951 | struct pci_controller *phb; | |
952 | struct eeh_early_enable_info info; | |
953 | ||
69376502 | 954 | if (!dn || !PCI_DN(dn)) |
1da177e4 | 955 | return; |
1635317f | 956 | phb = PCI_DN(dn)->phb; |
1da177e4 | 957 | if (NULL == phb || 0 == phb->buid) { |
69376502 LV |
958 | printk(KERN_WARNING "EEH: Expected buid but found none for %s\n", |
959 | dn->full_name); | |
960 | dump_stack(); | |
1da177e4 LT |
961 | return; |
962 | } | |
963 | ||
964 | info.buid_hi = BUID_HI(phb->buid); | |
965 | info.buid_lo = BUID_LO(phb->buid); | |
966 | early_enable_eeh(dn, &info); | |
967 | } | |
56b0fca3 | 968 | EXPORT_SYMBOL_GPL(eeh_add_device_early); |
1da177e4 LT |
969 | |
970 | /** | |
971 | * eeh_add_device_late - perform EEH initialization for the indicated pci device | |
972 | * @dev: pci device for which to set up EEH | |
973 | * | |
974 | * This routine must be used to complete EEH initialization for PCI | |
975 | * devices that were added after system boot (e.g. hotplug, dlpar). | |
976 | */ | |
977 | void eeh_add_device_late(struct pci_dev *dev) | |
978 | { | |
56b0fca3 LV |
979 | struct device_node *dn; |
980 | ||
1da177e4 LT |
981 | if (!dev || !eeh_subsystem_enabled) |
982 | return; | |
983 | ||
984 | #ifdef DEBUG | |
982245f0 | 985 | printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev)); |
1da177e4 LT |
986 | #endif |
987 | ||
56b0fca3 LV |
988 | pci_dev_get (dev); |
989 | dn = pci_device_to_OF_node(dev); | |
990 | PCI_DN(dn)->pcidev = dev; | |
991 | ||
1da177e4 LT |
992 | pci_addr_cache_insert_device (dev); |
993 | } | |
56b0fca3 | 994 | EXPORT_SYMBOL_GPL(eeh_add_device_late); |
1da177e4 LT |
995 | |
996 | /** | |
997 | * eeh_remove_device - undo EEH setup for the indicated pci device | |
998 | * @dev: pci device to be removed | |
999 | * | |
1000 | * This routine should be when a device is removed from a running | |
1001 | * system (e.g. by hotplug or dlpar). | |
1002 | */ | |
1003 | void eeh_remove_device(struct pci_dev *dev) | |
1004 | { | |
56b0fca3 | 1005 | struct device_node *dn; |
1da177e4 LT |
1006 | if (!dev || !eeh_subsystem_enabled) |
1007 | return; | |
1008 | ||
1009 | /* Unregister the device with the EEH/PCI address search system */ | |
1010 | #ifdef DEBUG | |
982245f0 | 1011 | printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev)); |
1da177e4 LT |
1012 | #endif |
1013 | pci_addr_cache_remove_device(dev); | |
56b0fca3 LV |
1014 | |
1015 | dn = pci_device_to_OF_node(dev); | |
1016 | PCI_DN(dn)->pcidev = NULL; | |
1017 | pci_dev_put (dev); | |
1da177e4 | 1018 | } |
56b0fca3 | 1019 | EXPORT_SYMBOL_GPL(eeh_remove_device); |
1da177e4 LT |
1020 | |
1021 | static int proc_eeh_show(struct seq_file *m, void *v) | |
1022 | { | |
1023 | unsigned int cpu; | |
1024 | unsigned long ffs = 0, positives = 0, failures = 0; | |
1025 | unsigned long resets = 0; | |
177bc936 | 1026 | unsigned long no_dev = 0, no_dn = 0, no_cfg = 0, no_check = 0; |
1da177e4 LT |
1027 | |
1028 | for_each_cpu(cpu) { | |
1029 | ffs += per_cpu(total_mmio_ffs, cpu); | |
1030 | positives += per_cpu(false_positives, cpu); | |
1031 | failures += per_cpu(ignored_failures, cpu); | |
1032 | resets += per_cpu(slot_resets, cpu); | |
177bc936 LV |
1033 | no_dev += per_cpu(no_device, cpu); |
1034 | no_dn += per_cpu(no_dn, cpu); | |
1035 | no_cfg += per_cpu(no_cfg_addr, cpu); | |
1036 | no_check += per_cpu(ignored_check, cpu); | |
1da177e4 LT |
1037 | } |
1038 | ||
1039 | if (0 == eeh_subsystem_enabled) { | |
1040 | seq_printf(m, "EEH Subsystem is globally disabled\n"); | |
1041 | seq_printf(m, "eeh_total_mmio_ffs=%ld\n", ffs); | |
1042 | } else { | |
1043 | seq_printf(m, "EEH Subsystem is enabled\n"); | |
177bc936 LV |
1044 | seq_printf(m, |
1045 | "no device=%ld\n" | |
1046 | "no device node=%ld\n" | |
1047 | "no config address=%ld\n" | |
1048 | "check not wanted=%ld\n" | |
1049 | "eeh_total_mmio_ffs=%ld\n" | |
1050 | "eeh_false_positives=%ld\n" | |
1051 | "eeh_ignored_failures=%ld\n" | |
1052 | "eeh_slot_resets=%ld\n", | |
1053 | no_dev, no_dn, no_cfg, no_check, | |
1054 | ffs, positives, failures, resets); | |
1da177e4 LT |
1055 | } |
1056 | ||
1057 | return 0; | |
1058 | } | |
1059 | ||
1060 | static int proc_eeh_open(struct inode *inode, struct file *file) | |
1061 | { | |
1062 | return single_open(file, proc_eeh_show, NULL); | |
1063 | } | |
1064 | ||
1065 | static struct file_operations proc_eeh_operations = { | |
1066 | .open = proc_eeh_open, | |
1067 | .read = seq_read, | |
1068 | .llseek = seq_lseek, | |
1069 | .release = single_release, | |
1070 | }; | |
1071 | ||
1072 | static int __init eeh_init_proc(void) | |
1073 | { | |
1074 | struct proc_dir_entry *e; | |
1075 | ||
1076 | if (systemcfg->platform & PLATFORM_PSERIES) { | |
1077 | e = create_proc_entry("ppc64/eeh", 0, NULL); | |
1078 | if (e) | |
1079 | e->proc_fops = &proc_eeh_operations; | |
1080 | } | |
1081 | ||
1082 | return 0; | |
1083 | } | |
1084 | __initcall(eeh_init_proc); |