Merge remote-tracking branches 'spi/topic/drivers', 'spi/topic/dw', 'spi/topic/efm32...
[linux-2.6-block.git] / arch / powerpc / platforms / powernv / eeh-ioda.c
CommitLineData
8747f363
GS
1/*
2 * The file intends to implement the functions needed by EEH, which is
3 * built on IODA compliant chip. Actually, lots of functions related
4 * to EEH would be built based on the OPAL APIs.
5 *
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/bootmem.h>
8998897b 15#include <linux/debugfs.h>
8747f363 16#include <linux/delay.h>
8747f363
GS
17#include <linux/io.h>
18#include <linux/irq.h>
19#include <linux/kernel.h>
20#include <linux/msi.h>
7cb9d93d 21#include <linux/notifier.h>
8747f363
GS
22#include <linux/pci.h>
23#include <linux/string.h>
24
25#include <asm/eeh.h>
26#include <asm/eeh_event.h>
27#include <asm/io.h>
28#include <asm/iommu.h>
29#include <asm/msi_bitmap.h>
30#include <asm/opal.h>
31#include <asm/pci-bridge.h>
32#include <asm/ppc-pci.h>
33#include <asm/tce.h>
34
35#include "powernv.h"
36#include "pci.h"
37
7cb9d93d
GS
38static int ioda_eeh_nb_init = 0;
39
40static int ioda_eeh_event(struct notifier_block *nb,
41 unsigned long events, void *change)
42{
43 uint64_t changed_evts = (uint64_t)change;
44
45 /* We simply send special EEH event */
46 if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
66f9af83
GS
47 (events & OPAL_EVENT_PCI_ERROR) &&
48 eeh_enabled())
7cb9d93d
GS
49 eeh_send_failure_event(NULL);
50
51 return 0;
52}
53
54static struct notifier_block ioda_eeh_nb = {
55 .notifier_call = ioda_eeh_event,
56 .next = NULL,
57 .priority = 0
58};
70f942db 59
8998897b 60#ifdef CONFIG_DEBUG_FS
ff6bdcd9 61static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
8998897b
GS
62{
63 struct pci_controller *hose = data;
64 struct pnv_phb *phb = hose->private_data;
65
ff6bdcd9 66 out_be64(phb->regs + offset, val);
8998897b
GS
67 return 0;
68}
69
ff6bdcd9 70static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
8998897b
GS
71{
72 struct pci_controller *hose = data;
73 struct pnv_phb *phb = hose->private_data;
74
ff6bdcd9 75 *val = in_be64(phb->regs + offset);
8998897b
GS
76 return 0;
77}
78
ff6bdcd9
GS
79static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
80{
81 return ioda_eeh_dbgfs_set(data, 0xD10, val);
82}
83
84static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
85{
86 return ioda_eeh_dbgfs_get(data, 0xD10, val);
87}
88
89static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
90{
91 return ioda_eeh_dbgfs_set(data, 0xD90, val);
92}
93
94static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
95{
96 return ioda_eeh_dbgfs_get(data, 0xD90, val);
97}
98
99static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
100{
101 return ioda_eeh_dbgfs_set(data, 0xE10, val);
102}
103
104static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
105{
106 return ioda_eeh_dbgfs_get(data, 0xE10, val);
107}
108
109DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
110 ioda_eeh_outb_dbgfs_set, "0x%llx\n");
111DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
112 ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
113DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
114 ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
8998897b
GS
115#endif /* CONFIG_DEBUG_FS */
116
94716604 117
73370c66
GS
118/**
119 * ioda_eeh_post_init - Chip dependent post initialization
120 * @hose: PCI controller
121 *
122 * The function will be called after eeh PEs and devices
123 * have been built. That means the EEH is ready to supply
124 * service with I/O cache.
125 */
126static int ioda_eeh_post_init(struct pci_controller *hose)
127{
128 struct pnv_phb *phb = hose->private_data;
7cb9d93d
GS
129 int ret;
130
131 /* Register OPAL event notifier */
132 if (!ioda_eeh_nb_init) {
133 ret = opal_notifier_register(&ioda_eeh_nb);
134 if (ret) {
135 pr_err("%s: Can't register OPAL event notifier (%d)\n",
136 __func__, ret);
137 return ret;
138 }
139
140 ioda_eeh_nb_init = 1;
141 }
73370c66 142
8998897b 143#ifdef CONFIG_DEBUG_FS
ff6bdcd9
GS
144 if (phb->dbgfs) {
145 debugfs_create_file("err_injct_outbound", 0600,
146 phb->dbgfs, hose,
147 &ioda_eeh_outb_dbgfs_ops);
148 debugfs_create_file("err_injct_inboundA", 0600,
20bb842b 149 phb->dbgfs, hose,
ff6bdcd9
GS
150 &ioda_eeh_inbA_dbgfs_ops);
151 debugfs_create_file("err_injct_inboundB", 0600,
152 phb->dbgfs, hose,
153 &ioda_eeh_inbB_dbgfs_ops);
154 }
8998897b
GS
155#endif
156
20bb842b 157 phb->eeh_state |= PNV_EEH_STATE_ENABLED;
73370c66
GS
158
159 return 0;
160}
161
eb005983
GS
162/**
163 * ioda_eeh_set_option - Set EEH operation or I/O setting
164 * @pe: EEH PE
165 * @option: options
166 *
167 * Enable or disable EEH option for the indicated PE. The
168 * function also can be used to enable I/O or DMA for the
169 * PE.
170 */
171static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
172{
173 s64 ret;
174 u32 pe_no;
175 struct pci_controller *hose = pe->phb;
176 struct pnv_phb *phb = hose->private_data;
177
178 /* Check on PE number */
179 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
180 pr_err("%s: PE address %x out of range [0, %x] "
181 "on PHB#%x\n",
182 __func__, pe->addr, phb->ioda.total_pe,
183 hose->global_number);
184 return -EINVAL;
185 }
186
187 pe_no = pe->addr;
188 switch (option) {
189 case EEH_OPT_DISABLE:
190 ret = -EEXIST;
191 break;
192 case EEH_OPT_ENABLE:
193 ret = 0;
194 break;
195 case EEH_OPT_THAW_MMIO:
196 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
197 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
198 if (ret) {
199 pr_warning("%s: Failed to enable MMIO for "
200 "PHB#%x-PE#%x, err=%lld\n",
201 __func__, hose->global_number, pe_no, ret);
202 return -EIO;
203 }
204
205 break;
206 case EEH_OPT_THAW_DMA:
207 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
208 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
209 if (ret) {
210 pr_warning("%s: Failed to enable DMA for "
211 "PHB#%x-PE#%x, err=%lld\n",
212 __func__, hose->global_number, pe_no, ret);
213 return -EIO;
214 }
215
216 break;
217 default:
218 pr_warning("%s: Invalid option %d\n", __func__, option);
219 return -EINVAL;
220 }
221
222 return ret;
223}
224
94716604
GS
225static void ioda_eeh_phb_diag(struct pci_controller *hose)
226{
227 struct pnv_phb *phb = hose->private_data;
228 long rc;
229
230 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
231 PNV_PCI_DIAG_BUF_SIZE);
232 if (rc != OPAL_SUCCESS) {
233 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
234 __func__, hose->global_number, rc);
235 return;
236 }
237
238 pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
239}
240
8c41a7f3
GS
241/**
242 * ioda_eeh_get_state - Retrieve the state of PE
243 * @pe: EEH PE
244 *
245 * The PE's state should be retrieved from the PEEV, PEST
246 * IODA tables. Since the OPAL has exported the function
247 * to do it, it'd better to use that.
248 */
249static int ioda_eeh_get_state(struct eeh_pe *pe)
250{
251 s64 ret = 0;
252 u8 fstate;
253 u16 pcierr;
254 u32 pe_no;
255 int result;
256 struct pci_controller *hose = pe->phb;
257 struct pnv_phb *phb = hose->private_data;
258
259 /*
260 * Sanity check on PE address. The PHB PE address should
261 * be zero.
262 */
263 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
264 pr_err("%s: PE address %x out of range [0, %x] "
265 "on PHB#%x\n",
266 __func__, pe->addr, phb->ioda.total_pe,
267 hose->global_number);
268 return EEH_STATE_NOT_SUPPORT;
269 }
270
271 /* Retrieve PE status through OPAL */
272 pe_no = pe->addr;
273 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
274 &fstate, &pcierr, NULL);
275 if (ret) {
276 pr_err("%s: Failed to get EEH status on "
277 "PHB#%x-PE#%x\n, err=%lld\n",
278 __func__, hose->global_number, pe_no, ret);
279 return EEH_STATE_NOT_SUPPORT;
280 }
281
282 /* Check PHB status */
283 if (pe->type & EEH_PE_PHB) {
284 result = 0;
285 result &= ~EEH_STATE_RESET_ACTIVE;
286
287 if (pcierr != OPAL_EEH_PHB_ERROR) {
288 result |= EEH_STATE_MMIO_ACTIVE;
289 result |= EEH_STATE_DMA_ACTIVE;
290 result |= EEH_STATE_MMIO_ENABLED;
291 result |= EEH_STATE_DMA_ENABLED;
94716604
GS
292 } else if (!(pe->state & EEH_PE_ISOLATED)) {
293 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
294 ioda_eeh_phb_diag(hose);
8c41a7f3
GS
295 }
296
297 return result;
298 }
299
300 /* Parse result out */
301 result = 0;
302 switch (fstate) {
303 case OPAL_EEH_STOPPED_NOT_FROZEN:
304 result &= ~EEH_STATE_RESET_ACTIVE;
305 result |= EEH_STATE_MMIO_ACTIVE;
306 result |= EEH_STATE_DMA_ACTIVE;
307 result |= EEH_STATE_MMIO_ENABLED;
308 result |= EEH_STATE_DMA_ENABLED;
309 break;
310 case OPAL_EEH_STOPPED_MMIO_FREEZE:
311 result &= ~EEH_STATE_RESET_ACTIVE;
312 result |= EEH_STATE_DMA_ACTIVE;
313 result |= EEH_STATE_DMA_ENABLED;
314 break;
315 case OPAL_EEH_STOPPED_DMA_FREEZE:
316 result &= ~EEH_STATE_RESET_ACTIVE;
317 result |= EEH_STATE_MMIO_ACTIVE;
318 result |= EEH_STATE_MMIO_ENABLED;
319 break;
320 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
321 result &= ~EEH_STATE_RESET_ACTIVE;
322 break;
323 case OPAL_EEH_STOPPED_RESET:
324 result |= EEH_STATE_RESET_ACTIVE;
325 break;
326 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
327 result |= EEH_STATE_UNAVAILABLE;
328 break;
329 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
330 result |= EEH_STATE_NOT_SUPPORT;
331 break;
332 default:
333 pr_warning("%s: Unexpected EEH status 0x%x "
334 "on PHB#%x-PE#%x\n",
335 __func__, fstate, hose->global_number, pe_no);
336 }
337
94716604
GS
338 /* Dump PHB diag-data for frozen PE */
339 if (result != EEH_STATE_NOT_SUPPORT &&
340 (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
341 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
342 !(pe->state & EEH_PE_ISOLATED)) {
343 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
344 ioda_eeh_phb_diag(hose);
345 }
346
8c41a7f3
GS
347 return result;
348}
349
9d5cab00
GS
350static int ioda_eeh_pe_clear(struct eeh_pe *pe)
351{
352 struct pci_controller *hose;
353 struct pnv_phb *phb;
354 u32 pe_no;
355 u8 fstate;
356 u16 pcierr;
357 s64 ret;
358
359 pe_no = pe->addr;
360 hose = pe->phb;
361 phb = pe->phb->private_data;
362
363 /* Clear the EEH error on the PE */
364 ret = opal_pci_eeh_freeze_clear(phb->opal_id,
365 pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
366 if (ret) {
367 pr_err("%s: Failed to clear EEH error for "
368 "PHB#%x-PE#%x, err=%lld\n",
369 __func__, hose->global_number, pe_no, ret);
370 return -EIO;
371 }
372
373 /*
374 * Read the PE state back and verify that the frozen
375 * state has been removed.
376 */
377 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
378 &fstate, &pcierr, NULL);
379 if (ret) {
380 pr_err("%s: Failed to get EEH status on "
381 "PHB#%x-PE#%x\n, err=%lld\n",
382 __func__, hose->global_number, pe_no, ret);
383 return -EIO;
384 }
385
386 if (fstate != OPAL_EEH_STOPPED_NOT_FROZEN) {
387 pr_err("%s: Frozen state not cleared on "
388 "PHB#%x-PE#%x, sts=%x\n",
389 __func__, hose->global_number, pe_no, fstate);
390 return -EIO;
391 }
392
393 return 0;
394}
395
396static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
397{
398 s64 rc = OPAL_HARDWARE;
399
400 while (1) {
401 rc = opal_pci_poll(phb->opal_id);
402 if (rc <= 0)
403 break;
404
405 msleep(rc);
406 }
407
408 return rc;
409}
410
411static int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
412{
413 struct pnv_phb *phb = hose->private_data;
414 s64 rc = OPAL_HARDWARE;
415
416 pr_debug("%s: Reset PHB#%x, option=%d\n",
417 __func__, hose->global_number, option);
418
419 /* Issue PHB complete reset request */
420 if (option == EEH_RESET_FUNDAMENTAL ||
421 option == EEH_RESET_HOT)
422 rc = opal_pci_reset(phb->opal_id,
423 OPAL_PHB_COMPLETE,
424 OPAL_ASSERT_RESET);
425 else if (option == EEH_RESET_DEACTIVATE)
426 rc = opal_pci_reset(phb->opal_id,
427 OPAL_PHB_COMPLETE,
428 OPAL_DEASSERT_RESET);
429 if (rc < 0)
430 goto out;
431
432 /*
433 * Poll state of the PHB until the request is done
434 * successfully.
435 */
436 rc = ioda_eeh_phb_poll(phb);
437out:
438 if (rc != OPAL_SUCCESS)
439 return -EIO;
440
441 return 0;
442}
443
444static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
445{
446 struct pnv_phb *phb = hose->private_data;
447 s64 rc = OPAL_SUCCESS;
448
449 pr_debug("%s: Reset PHB#%x, option=%d\n",
450 __func__, hose->global_number, option);
451
452 /*
453 * During the reset deassert time, we needn't care
454 * the reset scope because the firmware does nothing
455 * for fundamental or hot reset during deassert phase.
456 */
457 if (option == EEH_RESET_FUNDAMENTAL)
458 rc = opal_pci_reset(phb->opal_id,
459 OPAL_PCI_FUNDAMENTAL_RESET,
460 OPAL_ASSERT_RESET);
461 else if (option == EEH_RESET_HOT)
462 rc = opal_pci_reset(phb->opal_id,
463 OPAL_PCI_HOT_RESET,
464 OPAL_ASSERT_RESET);
465 else if (option == EEH_RESET_DEACTIVATE)
466 rc = opal_pci_reset(phb->opal_id,
467 OPAL_PCI_HOT_RESET,
468 OPAL_DEASSERT_RESET);
469 if (rc < 0)
470 goto out;
471
472 /* Poll state of the PHB until the request is done */
473 rc = ioda_eeh_phb_poll(phb);
474out:
475 if (rc != OPAL_SUCCESS)
476 return -EIO;
477
478 return 0;
479}
480
481static int ioda_eeh_bridge_reset(struct pci_controller *hose,
482 struct pci_dev *dev, int option)
483{
484 u16 ctrl;
485
486 pr_debug("%s: Reset device %04x:%02x:%02x.%01x with option %d\n",
487 __func__, hose->global_number, dev->bus->number,
488 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), option);
489
490 switch (option) {
491 case EEH_RESET_FUNDAMENTAL:
492 case EEH_RESET_HOT:
493 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
494 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
495 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
496 break;
497 case EEH_RESET_DEACTIVATE:
498 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
499 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
500 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
501 break;
502 }
503
504 return 0;
505}
506
507/**
508 * ioda_eeh_reset - Reset the indicated PE
509 * @pe: EEH PE
510 * @option: reset option
511 *
512 * Do reset on the indicated PE. For PCI bus sensitive PE,
513 * we need to reset the parent p2p bridge. The PHB has to
514 * be reinitialized if the p2p bridge is root bridge. For
515 * PCI device sensitive PE, we will try to reset the device
516 * through FLR. For now, we don't have OPAL APIs to do HARD
517 * reset yet, so all reset would be SOFT (HOT) reset.
518 */
519static int ioda_eeh_reset(struct eeh_pe *pe, int option)
520{
521 struct pci_controller *hose = pe->phb;
5b2e198e 522 struct pci_bus *bus;
9d5cab00
GS
523 int ret;
524
525 /*
526 * Anyway, we have to clear the problematic state for the
527 * corresponding PE. However, we needn't do it if the PE
528 * is PHB associated. That means the PHB is having fatal
529 * errors and it needs reset. Further more, the AIB interface
530 * isn't reliable any more.
531 */
532 if (!(pe->type & EEH_PE_PHB) &&
533 (option == EEH_RESET_HOT ||
534 option == EEH_RESET_FUNDAMENTAL)) {
535 ret = ioda_eeh_pe_clear(pe);
536 if (ret)
537 return -EIO;
538 }
539
540 /*
541 * The rules applied to reset, either fundamental or hot reset:
542 *
543 * We always reset the direct upstream bridge of the PE. If the
544 * direct upstream bridge isn't root bridge, we always take hot
545 * reset no matter what option (fundamental or hot) is. Otherwise,
546 * we should do the reset according to the required option.
547 */
548 if (pe->type & EEH_PE_PHB) {
549 ret = ioda_eeh_phb_reset(hose, option);
550 } else {
5b2e198e
GS
551 bus = eeh_pe_bus_get(pe);
552 if (pci_is_root_bus(bus))
9d5cab00
GS
553 ret = ioda_eeh_root_reset(hose, option);
554 else
5b2e198e 555 ret = ioda_eeh_bridge_reset(hose, bus->self, option);
9d5cab00
GS
556 }
557
558 return ret;
559}
560
bf90dfea
GS
561/**
562 * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
563 * @pe: EEH PE
564 *
565 * For particular PE, it might have included PCI bridges. In order
566 * to make the PE work properly, those PCI bridges should be configured
567 * correctly. However, we need do nothing on P7IOC since the reset
568 * function will do everything that should be covered by the function.
569 */
570static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
571{
572 return 0;
573}
574
70f942db
GS
575static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
576{
577 /* GEM */
578 pr_info(" GEM XFIR: %016llx\n", data->gemXfir);
579 pr_info(" GEM RFIR: %016llx\n", data->gemRfir);
580 pr_info(" GEM RIRQFIR: %016llx\n", data->gemRirqfir);
581 pr_info(" GEM Mask: %016llx\n", data->gemMask);
582 pr_info(" GEM RWOF: %016llx\n", data->gemRwof);
583
584 /* LEM */
585 pr_info(" LEM FIR: %016llx\n", data->lemFir);
586 pr_info(" LEM Error Mask: %016llx\n", data->lemErrMask);
587 pr_info(" LEM Action 0: %016llx\n", data->lemAction0);
588 pr_info(" LEM Action 1: %016llx\n", data->lemAction1);
589 pr_info(" LEM WOF: %016llx\n", data->lemWof);
590}
591
592static void ioda_eeh_hub_diag(struct pci_controller *hose)
593{
594 struct pnv_phb *phb = hose->private_data;
ca1de5de 595 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
70f942db
GS
596 long rc;
597
ca1de5de 598 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
70f942db
GS
599 if (rc != OPAL_SUCCESS) {
600 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
601 __func__, phb->hub_id, rc);
602 return;
603 }
604
605 switch (data->type) {
606 case OPAL_P7IOC_DIAG_TYPE_RGC:
607 pr_info("P7IOC diag-data for RGC\n\n");
608 ioda_eeh_hub_diag_common(data);
609 pr_info(" RGC Status: %016llx\n", data->rgc.rgcStatus);
610 pr_info(" RGC LDCP: %016llx\n", data->rgc.rgcLdcp);
611 break;
612 case OPAL_P7IOC_DIAG_TYPE_BI:
613 pr_info("P7IOC diag-data for BI %s\n\n",
614 data->bi.biDownbound ? "Downbound" : "Upbound");
615 ioda_eeh_hub_diag_common(data);
616 pr_info(" BI LDCP 0: %016llx\n", data->bi.biLdcp0);
617 pr_info(" BI LDCP 1: %016llx\n", data->bi.biLdcp1);
618 pr_info(" BI LDCP 2: %016llx\n", data->bi.biLdcp2);
619 pr_info(" BI Fence Status: %016llx\n", data->bi.biFenceStatus);
620 break;
621 case OPAL_P7IOC_DIAG_TYPE_CI:
622 pr_info("P7IOC diag-data for CI Port %d\\nn",
623 data->ci.ciPort);
624 ioda_eeh_hub_diag_common(data);
625 pr_info(" CI Port Status: %016llx\n", data->ci.ciPortStatus);
626 pr_info(" CI Port LDCP: %016llx\n", data->ci.ciPortLdcp);
627 break;
628 case OPAL_P7IOC_DIAG_TYPE_MISC:
629 pr_info("P7IOC diag-data for MISC\n\n");
630 ioda_eeh_hub_diag_common(data);
631 break;
632 case OPAL_P7IOC_DIAG_TYPE_I2C:
633 pr_info("P7IOC diag-data for I2C\n\n");
634 ioda_eeh_hub_diag_common(data);
635 break;
636 default:
637 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
638 __func__, phb->hub_id, data->type);
639 }
640}
641
70f942db
GS
642static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
643 struct eeh_pe **pe)
644{
645 struct eeh_pe *phb_pe;
646
647 phb_pe = eeh_phb_pe_get(hose);
648 if (!phb_pe) {
649 pr_warning("%s Can't find PE for PHB#%d\n",
650 __func__, hose->global_number);
651 return -EEXIST;
652 }
653
654 *pe = phb_pe;
655 return 0;
656}
657
658static int ioda_eeh_get_pe(struct pci_controller *hose,
659 u16 pe_no, struct eeh_pe **pe)
660{
661 struct eeh_pe *phb_pe, *dev_pe;
662 struct eeh_dev dev;
663
664 /* Find the PHB PE */
665 if (ioda_eeh_get_phb_pe(hose, &phb_pe))
666 return -EEXIST;
667
668 /* Find the PE according to PE# */
669 memset(&dev, 0, sizeof(struct eeh_dev));
670 dev.phb = hose;
671 dev.pe_config_addr = pe_no;
672 dev_pe = eeh_pe_get(&dev);
cb5b242c 673 if (!dev_pe) return -EEXIST;
70f942db
GS
674
675 *pe = dev_pe;
676 return 0;
677}
678
679/**
680 * ioda_eeh_next_error - Retrieve next error for EEH core to handle
681 * @pe: The affected PE
682 *
683 * The function is expected to be called by EEH core while it gets
684 * special EEH event (without binding PE). The function calls to
685 * OPAL APIs for next error to handle. The informational error is
686 * handled internally by platform. However, the dead IOC, dead PHB,
687 * fenced PHB and frozen PE should be handled by EEH core eventually.
688 */
689static int ioda_eeh_next_error(struct eeh_pe **pe)
690{
7e4e7867 691 struct pci_controller *hose;
70f942db
GS
692 struct pnv_phb *phb;
693 u64 frozen_pe_no;
694 u16 err_type, severity;
695 long rc;
7e4e7867 696 int ret = EEH_NEXT_ERR_NONE;
70f942db 697
7cb9d93d
GS
698 /*
699 * While running here, it's safe to purge the event queue.
700 * And we should keep the cached OPAL notifier event sychronized
701 * between the kernel and firmware.
702 */
70f942db 703 eeh_remove_event(NULL);
7cb9d93d 704 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
70f942db 705
7e4e7867 706 list_for_each_entry(hose, &hose_list, list_node) {
70f942db
GS
707 /*
708 * If the subordinate PCI buses of the PHB has been
709 * removed, we needn't take care of it any more.
710 */
711 phb = hose->private_data;
0b9e267d 712 if (phb->eeh_state & PNV_EEH_STATE_REMOVED)
70f942db
GS
713 continue;
714
715 rc = opal_pci_next_error(phb->opal_id,
716 &frozen_pe_no, &err_type, &severity);
717
718 /* If OPAL API returns error, we needn't proceed */
719 if (rc != OPAL_SUCCESS) {
20212703
MQ
720 pr_devel("%s: Invalid return value on "
721 "PHB#%x (0x%lx) from opal_pci_next_error",
722 __func__, hose->global_number, rc);
70f942db
GS
723 continue;
724 }
725
726 /* If the PHB doesn't have error, stop processing */
727 if (err_type == OPAL_EEH_NO_ERROR ||
728 severity == OPAL_EEH_SEV_NO_ERROR) {
20212703
MQ
729 pr_devel("%s: No error found on PHB#%x\n",
730 __func__, hose->global_number);
70f942db
GS
731 continue;
732 }
733
734 /*
735 * Processing the error. We're expecting the error with
736 * highest priority reported upon multiple errors on the
737 * specific PHB.
738 */
20212703
MQ
739 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
740 __func__, err_type, severity,
741 frozen_pe_no, hose->global_number);
70f942db
GS
742 switch (err_type) {
743 case OPAL_EEH_IOC_ERROR:
744 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
7e4e7867
GS
745 list_for_each_entry(hose, &hose_list,
746 list_node) {
70f942db 747 phb = hose->private_data;
0b9e267d 748 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
70f942db
GS
749 }
750
56ca4fde 751 pr_err("EEH: dead IOC detected\n");
7e4e7867 752 ret = EEH_NEXT_ERR_DEAD_IOC;
56ca4fde
GS
753 } else if (severity == OPAL_EEH_SEV_INF) {
754 pr_info("EEH: IOC informative error "
755 "detected\n");
70f942db 756 ioda_eeh_hub_diag(hose);
7e4e7867 757 ret = EEH_NEXT_ERR_NONE;
56ca4fde 758 }
70f942db
GS
759
760 break;
761 case OPAL_EEH_PHB_ERROR:
762 if (severity == OPAL_EEH_SEV_PHB_DEAD) {
763 if (ioda_eeh_get_phb_pe(hose, pe))
764 break;
765
56ca4fde
GS
766 pr_err("EEH: dead PHB#%x detected\n",
767 hose->global_number);
0b9e267d 768 phb->eeh_state |= PNV_EEH_STATE_REMOVED;
7e4e7867 769 ret = EEH_NEXT_ERR_DEAD_PHB;
70f942db
GS
770 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
771 if (ioda_eeh_get_phb_pe(hose, pe))
772 break;
773
56ca4fde
GS
774 pr_err("EEH: fenced PHB#%x detected\n",
775 hose->global_number);
7e4e7867 776 ret = EEH_NEXT_ERR_FENCED_PHB;
56ca4fde
GS
777 } else if (severity == OPAL_EEH_SEV_INF) {
778 pr_info("EEH: PHB#%x informative error "
779 "detected\n",
780 hose->global_number);
70f942db 781 ioda_eeh_phb_diag(hose);
7e4e7867 782 ret = EEH_NEXT_ERR_NONE;
56ca4fde 783 }
70f942db
GS
784
785 break;
786 case OPAL_EEH_PE_ERROR:
cb5b242c
GS
787 /*
788 * If we can't find the corresponding PE, the
789 * PEEV / PEST would be messy. So we force an
790 * fenced PHB so that it can be recovered.
791 */
792 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
793 if (!ioda_eeh_get_phb_pe(hose, pe)) {
794 pr_err("EEH: Escalated fenced PHB#%x "
795 "detected for PE#%llx\n",
796 hose->global_number,
797 frozen_pe_no);
798 ret = EEH_NEXT_ERR_FENCED_PHB;
799 } else {
800 ret = EEH_NEXT_ERR_NONE;
801 }
802 } else {
803 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
804 (*pe)->addr, (*pe)->phb->global_number);
805 ret = EEH_NEXT_ERR_FROZEN_PE;
806 }
70f942db 807
7e4e7867
GS
808 break;
809 default:
810 pr_warn("%s: Unexpected error type %d\n",
811 __func__, err_type);
70f942db 812 }
7e4e7867 813
94716604
GS
814 /*
815 * EEH core will try recover from fenced PHB or
816 * frozen PE. In the time for frozen PE, EEH core
817 * enable IO path for that before collecting logs,
818 * but it ruins the site. So we have to dump the
819 * log in advance here.
820 */
821 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
822 ret == EEH_NEXT_ERR_FENCED_PHB) &&
823 !((*pe)->state & EEH_PE_ISOLATED)) {
824 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
825 ioda_eeh_phb_diag(hose);
826 }
827
7e4e7867
GS
828 /*
829 * If we have no errors on the specific PHB or only
830 * informative error there, we continue poking it.
831 * Otherwise, we need actions to be taken by upper
832 * layer.
833 */
834 if (ret > EEH_NEXT_ERR_INF)
835 break;
70f942db
GS
836 }
837
70f942db
GS
838 return ret;
839}
840
8747f363 841struct pnv_eeh_ops ioda_eeh_ops = {
73370c66 842 .post_init = ioda_eeh_post_init,
eb005983 843 .set_option = ioda_eeh_set_option,
8c41a7f3 844 .get_state = ioda_eeh_get_state,
9d5cab00 845 .reset = ioda_eeh_reset,
bf90dfea 846 .configure_bridge = ioda_eeh_configure_bridge,
70f942db 847 .next_error = ioda_eeh_next_error
8747f363 848};