2 * The file intends to implement the functions needed by EEH, which is
3 * built on IODA compliant chip. Actually, lots of functions related
4 * to EEH would be built based on the OPAL APIs.
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/msi.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
26 #include <asm/eeh_event.h>
28 #include <asm/iommu.h>
29 #include <asm/msi_bitmap.h>
31 #include <asm/pci-bridge.h>
32 #include <asm/ppc-pci.h>
38 static int ioda_eeh_nb_init = 0;
40 static int ioda_eeh_event(struct notifier_block *nb,
41 unsigned long events, void *change)
43 uint64_t changed_evts = (uint64_t)change;
45 /* We simply send special EEH event */
46 if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
47 (events & OPAL_EVENT_PCI_ERROR) &&
49 eeh_send_failure_event(NULL);
54 static struct notifier_block ioda_eeh_nb = {
55 .notifier_call = ioda_eeh_event,
60 #ifdef CONFIG_DEBUG_FS
61 static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
63 struct pci_controller *hose = data;
64 struct pnv_phb *phb = hose->private_data;
66 out_be64(phb->regs + offset, val);
70 static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
72 struct pci_controller *hose = data;
73 struct pnv_phb *phb = hose->private_data;
75 *val = in_be64(phb->regs + offset);
79 static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
81 return ioda_eeh_dbgfs_set(data, 0xD10, val);
84 static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
86 return ioda_eeh_dbgfs_get(data, 0xD10, val);
89 static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
91 return ioda_eeh_dbgfs_set(data, 0xD90, val);
94 static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
96 return ioda_eeh_dbgfs_get(data, 0xD90, val);
99 static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
101 return ioda_eeh_dbgfs_set(data, 0xE10, val);
104 static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
106 return ioda_eeh_dbgfs_get(data, 0xE10, val);
109 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
110 ioda_eeh_outb_dbgfs_set, "0x%llx\n");
111 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
112 ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
113 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
114 ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
115 #endif /* CONFIG_DEBUG_FS */
119 * ioda_eeh_post_init - Chip dependent post initialization
120 * @hose: PCI controller
122 * The function will be called after eeh PEs and devices
123 * have been built. That means the EEH is ready to supply
124 * service with I/O cache.
126 static int ioda_eeh_post_init(struct pci_controller *hose)
128 struct pnv_phb *phb = hose->private_data;
131 /* Register OPAL event notifier */
132 if (!ioda_eeh_nb_init) {
133 ret = opal_notifier_register(&ioda_eeh_nb);
135 pr_err("%s: Can't register OPAL event notifier (%d)\n",
140 ioda_eeh_nb_init = 1;
143 #ifdef CONFIG_DEBUG_FS
145 debugfs_create_file("err_injct_outbound", 0600,
147 &ioda_eeh_outb_dbgfs_ops);
148 debugfs_create_file("err_injct_inboundA", 0600,
150 &ioda_eeh_inbA_dbgfs_ops);
151 debugfs_create_file("err_injct_inboundB", 0600,
153 &ioda_eeh_inbB_dbgfs_ops);
157 phb->flags |= PNV_PHB_FLAG_EEH;
163 * ioda_eeh_set_option - Set EEH operation or I/O setting
167 * Enable or disable EEH option for the indicated PE. The
168 * function also can be used to enable I/O or DMA for the
171 static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
175 struct pci_controller *hose = pe->phb;
176 struct pnv_phb *phb = hose->private_data;
178 /* Check on PE number */
179 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
180 pr_err("%s: PE address %x out of range [0, %x] "
182 __func__, pe->addr, phb->ioda.total_pe,
183 hose->global_number);
189 case EEH_OPT_DISABLE:
195 case EEH_OPT_THAW_MMIO:
196 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
197 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
199 pr_warning("%s: Failed to enable MMIO for "
200 "PHB#%x-PE#%x, err=%lld\n",
201 __func__, hose->global_number, pe_no, ret);
206 case EEH_OPT_THAW_DMA:
207 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
208 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
210 pr_warning("%s: Failed to enable DMA for "
211 "PHB#%x-PE#%x, err=%lld\n",
212 __func__, hose->global_number, pe_no, ret);
218 pr_warning("%s: Invalid option %d\n", __func__, option);
225 static void ioda_eeh_phb_diag(struct pci_controller *hose)
227 struct pnv_phb *phb = hose->private_data;
230 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
231 PNV_PCI_DIAG_BUF_SIZE);
232 if (rc != OPAL_SUCCESS) {
233 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
234 __func__, hose->global_number, rc);
238 pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
242 * ioda_eeh_get_state - Retrieve the state of PE
245 * The PE's state should be retrieved from the PEEV, PEST
246 * IODA tables. Since the OPAL has exported the function
247 * to do it, it'd better to use that.
249 static int ioda_eeh_get_state(struct eeh_pe *pe)
256 struct pci_controller *hose = pe->phb;
257 struct pnv_phb *phb = hose->private_data;
260 * Sanity check on PE address. The PHB PE address should
263 if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
264 pr_err("%s: PE address %x out of range [0, %x] "
266 __func__, pe->addr, phb->ioda.total_pe,
267 hose->global_number);
268 return EEH_STATE_NOT_SUPPORT;
271 /* Retrieve PE status through OPAL */
273 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
274 &fstate, &pcierr, NULL);
276 pr_err("%s: Failed to get EEH status on "
277 "PHB#%x-PE#%x\n, err=%lld\n",
278 __func__, hose->global_number, pe_no, ret);
279 return EEH_STATE_NOT_SUPPORT;
282 /* Check PHB status */
283 if (pe->type & EEH_PE_PHB) {
285 result &= ~EEH_STATE_RESET_ACTIVE;
287 if (pcierr != OPAL_EEH_PHB_ERROR) {
288 result |= EEH_STATE_MMIO_ACTIVE;
289 result |= EEH_STATE_DMA_ACTIVE;
290 result |= EEH_STATE_MMIO_ENABLED;
291 result |= EEH_STATE_DMA_ENABLED;
292 } else if (!(pe->state & EEH_PE_ISOLATED)) {
293 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
294 ioda_eeh_phb_diag(hose);
300 /* Parse result out */
303 case OPAL_EEH_STOPPED_NOT_FROZEN:
304 result &= ~EEH_STATE_RESET_ACTIVE;
305 result |= EEH_STATE_MMIO_ACTIVE;
306 result |= EEH_STATE_DMA_ACTIVE;
307 result |= EEH_STATE_MMIO_ENABLED;
308 result |= EEH_STATE_DMA_ENABLED;
310 case OPAL_EEH_STOPPED_MMIO_FREEZE:
311 result &= ~EEH_STATE_RESET_ACTIVE;
312 result |= EEH_STATE_DMA_ACTIVE;
313 result |= EEH_STATE_DMA_ENABLED;
315 case OPAL_EEH_STOPPED_DMA_FREEZE:
316 result &= ~EEH_STATE_RESET_ACTIVE;
317 result |= EEH_STATE_MMIO_ACTIVE;
318 result |= EEH_STATE_MMIO_ENABLED;
320 case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
321 result &= ~EEH_STATE_RESET_ACTIVE;
323 case OPAL_EEH_STOPPED_RESET:
324 result |= EEH_STATE_RESET_ACTIVE;
326 case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
327 result |= EEH_STATE_UNAVAILABLE;
329 case OPAL_EEH_STOPPED_PERM_UNAVAIL:
330 result |= EEH_STATE_NOT_SUPPORT;
333 pr_warning("%s: Unexpected EEH status 0x%x "
335 __func__, fstate, hose->global_number, pe_no);
338 /* Dump PHB diag-data for frozen PE */
339 if (result != EEH_STATE_NOT_SUPPORT &&
340 (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
341 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
342 !(pe->state & EEH_PE_ISOLATED)) {
343 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
344 ioda_eeh_phb_diag(hose);
350 static int ioda_eeh_pe_clear(struct eeh_pe *pe)
352 struct pci_controller *hose;
361 phb = pe->phb->private_data;
363 /* Clear the EEH error on the PE */
364 ret = opal_pci_eeh_freeze_clear(phb->opal_id,
365 pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
367 pr_err("%s: Failed to clear EEH error for "
368 "PHB#%x-PE#%x, err=%lld\n",
369 __func__, hose->global_number, pe_no, ret);
374 * Read the PE state back and verify that the frozen
375 * state has been removed.
377 ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
378 &fstate, &pcierr, NULL);
380 pr_err("%s: Failed to get EEH status on "
381 "PHB#%x-PE#%x\n, err=%lld\n",
382 __func__, hose->global_number, pe_no, ret);
386 if (fstate != OPAL_EEH_STOPPED_NOT_FROZEN) {
387 pr_err("%s: Frozen state not cleared on "
388 "PHB#%x-PE#%x, sts=%x\n",
389 __func__, hose->global_number, pe_no, fstate);
396 static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
398 s64 rc = OPAL_HARDWARE;
401 rc = opal_pci_poll(phb->opal_id);
411 static int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
413 struct pnv_phb *phb = hose->private_data;
414 s64 rc = OPAL_HARDWARE;
416 pr_debug("%s: Reset PHB#%x, option=%d\n",
417 __func__, hose->global_number, option);
419 /* Issue PHB complete reset request */
420 if (option == EEH_RESET_FUNDAMENTAL ||
421 option == EEH_RESET_HOT)
422 rc = opal_pci_reset(phb->opal_id,
425 else if (option == EEH_RESET_DEACTIVATE)
426 rc = opal_pci_reset(phb->opal_id,
428 OPAL_DEASSERT_RESET);
433 * Poll state of the PHB until the request is done
436 rc = ioda_eeh_phb_poll(phb);
438 if (rc != OPAL_SUCCESS)
444 static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
446 struct pnv_phb *phb = hose->private_data;
447 s64 rc = OPAL_SUCCESS;
449 pr_debug("%s: Reset PHB#%x, option=%d\n",
450 __func__, hose->global_number, option);
453 * During the reset deassert time, we needn't care
454 * the reset scope because the firmware does nothing
455 * for fundamental or hot reset during deassert phase.
457 if (option == EEH_RESET_FUNDAMENTAL)
458 rc = opal_pci_reset(phb->opal_id,
459 OPAL_PCI_FUNDAMENTAL_RESET,
461 else if (option == EEH_RESET_HOT)
462 rc = opal_pci_reset(phb->opal_id,
465 else if (option == EEH_RESET_DEACTIVATE)
466 rc = opal_pci_reset(phb->opal_id,
468 OPAL_DEASSERT_RESET);
472 /* Poll state of the PHB until the request is done */
473 rc = ioda_eeh_phb_poll(phb);
475 if (rc != OPAL_SUCCESS)
481 static int ioda_eeh_bridge_reset(struct pci_controller *hose,
482 struct pci_dev *dev, int option)
486 pr_debug("%s: Reset device %04x:%02x:%02x.%01x with option %d\n",
487 __func__, hose->global_number, dev->bus->number,
488 PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), option);
491 case EEH_RESET_FUNDAMENTAL:
493 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
494 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
495 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
497 case EEH_RESET_DEACTIVATE:
498 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
499 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
500 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
508 * ioda_eeh_reset - Reset the indicated PE
510 * @option: reset option
512 * Do reset on the indicated PE. For PCI bus sensitive PE,
513 * we need to reset the parent p2p bridge. The PHB has to
514 * be reinitialized if the p2p bridge is root bridge. For
515 * PCI device sensitive PE, we will try to reset the device
516 * through FLR. For now, we don't have OPAL APIs to do HARD
517 * reset yet, so all reset would be SOFT (HOT) reset.
519 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
521 struct pci_controller *hose = pe->phb;
526 * Anyway, we have to clear the problematic state for the
527 * corresponding PE. However, we needn't do it if the PE
528 * is PHB associated. That means the PHB is having fatal
529 * errors and it needs reset. Further more, the AIB interface
530 * isn't reliable any more.
532 if (!(pe->type & EEH_PE_PHB) &&
533 (option == EEH_RESET_HOT ||
534 option == EEH_RESET_FUNDAMENTAL)) {
535 ret = ioda_eeh_pe_clear(pe);
541 * The rules applied to reset, either fundamental or hot reset:
543 * We always reset the direct upstream bridge of the PE. If the
544 * direct upstream bridge isn't root bridge, we always take hot
545 * reset no matter what option (fundamental or hot) is. Otherwise,
546 * we should do the reset according to the required option.
548 if (pe->type & EEH_PE_PHB) {
549 ret = ioda_eeh_phb_reset(hose, option);
551 bus = eeh_pe_bus_get(pe);
552 if (pci_is_root_bus(bus))
553 ret = ioda_eeh_root_reset(hose, option);
555 ret = ioda_eeh_bridge_reset(hose, bus->self, option);
562 * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
565 * For particular PE, it might have included PCI bridges. In order
566 * to make the PE work properly, those PCI bridges should be configured
567 * correctly. However, we need do nothing on P7IOC since the reset
568 * function will do everything that should be covered by the function.
570 static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
575 static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
578 pr_info(" GEM XFIR: %016llx\n", data->gemXfir);
579 pr_info(" GEM RFIR: %016llx\n", data->gemRfir);
580 pr_info(" GEM RIRQFIR: %016llx\n", data->gemRirqfir);
581 pr_info(" GEM Mask: %016llx\n", data->gemMask);
582 pr_info(" GEM RWOF: %016llx\n", data->gemRwof);
585 pr_info(" LEM FIR: %016llx\n", data->lemFir);
586 pr_info(" LEM Error Mask: %016llx\n", data->lemErrMask);
587 pr_info(" LEM Action 0: %016llx\n", data->lemAction0);
588 pr_info(" LEM Action 1: %016llx\n", data->lemAction1);
589 pr_info(" LEM WOF: %016llx\n", data->lemWof);
592 static void ioda_eeh_hub_diag(struct pci_controller *hose)
594 struct pnv_phb *phb = hose->private_data;
595 struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
598 rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
599 if (rc != OPAL_SUCCESS) {
600 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
601 __func__, phb->hub_id, rc);
605 switch (data->type) {
606 case OPAL_P7IOC_DIAG_TYPE_RGC:
607 pr_info("P7IOC diag-data for RGC\n\n");
608 ioda_eeh_hub_diag_common(data);
609 pr_info(" RGC Status: %016llx\n", data->rgc.rgcStatus);
610 pr_info(" RGC LDCP: %016llx\n", data->rgc.rgcLdcp);
612 case OPAL_P7IOC_DIAG_TYPE_BI:
613 pr_info("P7IOC diag-data for BI %s\n\n",
614 data->bi.biDownbound ? "Downbound" : "Upbound");
615 ioda_eeh_hub_diag_common(data);
616 pr_info(" BI LDCP 0: %016llx\n", data->bi.biLdcp0);
617 pr_info(" BI LDCP 1: %016llx\n", data->bi.biLdcp1);
618 pr_info(" BI LDCP 2: %016llx\n", data->bi.biLdcp2);
619 pr_info(" BI Fence Status: %016llx\n", data->bi.biFenceStatus);
621 case OPAL_P7IOC_DIAG_TYPE_CI:
622 pr_info("P7IOC diag-data for CI Port %d\\nn",
624 ioda_eeh_hub_diag_common(data);
625 pr_info(" CI Port Status: %016llx\n", data->ci.ciPortStatus);
626 pr_info(" CI Port LDCP: %016llx\n", data->ci.ciPortLdcp);
628 case OPAL_P7IOC_DIAG_TYPE_MISC:
629 pr_info("P7IOC diag-data for MISC\n\n");
630 ioda_eeh_hub_diag_common(data);
632 case OPAL_P7IOC_DIAG_TYPE_I2C:
633 pr_info("P7IOC diag-data for I2C\n\n");
634 ioda_eeh_hub_diag_common(data);
637 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
638 __func__, phb->hub_id, data->type);
642 static int ioda_eeh_get_pe(struct pci_controller *hose,
643 u16 pe_no, struct eeh_pe **pe)
645 struct eeh_pe *phb_pe, *dev_pe;
648 /* Find the PHB PE */
649 phb_pe = eeh_phb_pe_get(hose);
653 /* Find the PE according to PE# */
654 memset(&dev, 0, sizeof(struct eeh_dev));
656 dev.pe_config_addr = pe_no;
657 dev_pe = eeh_pe_get(&dev);
658 if (!dev_pe) return -EEXIST;
665 * ioda_eeh_next_error - Retrieve next error for EEH core to handle
666 * @pe: The affected PE
668 * The function is expected to be called by EEH core while it gets
669 * special EEH event (without binding PE). The function calls to
670 * OPAL APIs for next error to handle. The informational error is
671 * handled internally by platform. However, the dead IOC, dead PHB,
672 * fenced PHB and frozen PE should be handled by EEH core eventually.
674 static int ioda_eeh_next_error(struct eeh_pe **pe)
676 struct pci_controller *hose;
678 struct eeh_pe *phb_pe;
680 u16 err_type, severity;
682 int ret = EEH_NEXT_ERR_NONE;
685 * While running here, it's safe to purge the event queue.
686 * And we should keep the cached OPAL notifier event sychronized
687 * between the kernel and firmware.
689 eeh_remove_event(NULL);
690 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
692 list_for_each_entry(hose, &hose_list, list_node) {
694 * If the subordinate PCI buses of the PHB has been
695 * removed or is exactly under error recovery, we
696 * needn't take care of it any more.
698 phb = hose->private_data;
699 phb_pe = eeh_phb_pe_get(hose);
700 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
703 rc = opal_pci_next_error(phb->opal_id,
704 &frozen_pe_no, &err_type, &severity);
706 /* If OPAL API returns error, we needn't proceed */
707 if (rc != OPAL_SUCCESS) {
708 pr_devel("%s: Invalid return value on "
709 "PHB#%x (0x%lx) from opal_pci_next_error",
710 __func__, hose->global_number, rc);
714 /* If the PHB doesn't have error, stop processing */
715 if (err_type == OPAL_EEH_NO_ERROR ||
716 severity == OPAL_EEH_SEV_NO_ERROR) {
717 pr_devel("%s: No error found on PHB#%x\n",
718 __func__, hose->global_number);
723 * Processing the error. We're expecting the error with
724 * highest priority reported upon multiple errors on the
727 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
728 __func__, err_type, severity,
729 frozen_pe_no, hose->global_number);
731 case OPAL_EEH_IOC_ERROR:
732 if (severity == OPAL_EEH_SEV_IOC_DEAD) {
733 pr_err("EEH: dead IOC detected\n");
734 ret = EEH_NEXT_ERR_DEAD_IOC;
735 } else if (severity == OPAL_EEH_SEV_INF) {
736 pr_info("EEH: IOC informative error "
738 ioda_eeh_hub_diag(hose);
739 ret = EEH_NEXT_ERR_NONE;
743 case OPAL_EEH_PHB_ERROR:
744 if (severity == OPAL_EEH_SEV_PHB_DEAD) {
746 pr_err("EEH: dead PHB#%x detected\n",
747 hose->global_number);
748 ret = EEH_NEXT_ERR_DEAD_PHB;
749 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
751 pr_err("EEH: fenced PHB#%x detected\n",
752 hose->global_number);
753 ret = EEH_NEXT_ERR_FENCED_PHB;
754 } else if (severity == OPAL_EEH_SEV_INF) {
755 pr_info("EEH: PHB#%x informative error "
757 hose->global_number);
758 ioda_eeh_phb_diag(hose);
759 ret = EEH_NEXT_ERR_NONE;
763 case OPAL_EEH_PE_ERROR:
765 * If we can't find the corresponding PE, the
766 * PEEV / PEST would be messy. So we force an
767 * fenced PHB so that it can be recovered.
769 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
771 pr_err("EEH: Escalated fenced PHB#%x "
772 "detected for PE#%llx\n",
775 ret = EEH_NEXT_ERR_FENCED_PHB;
777 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
778 (*pe)->addr, (*pe)->phb->global_number);
779 ret = EEH_NEXT_ERR_FROZEN_PE;
784 pr_warn("%s: Unexpected error type %d\n",
789 * EEH core will try recover from fenced PHB or
790 * frozen PE. In the time for frozen PE, EEH core
791 * enable IO path for that before collecting logs,
792 * but it ruins the site. So we have to dump the
793 * log in advance here.
795 if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
796 ret == EEH_NEXT_ERR_FENCED_PHB) &&
797 !((*pe)->state & EEH_PE_ISOLATED)) {
798 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
799 ioda_eeh_phb_diag(hose);
803 * If we have no errors on the specific PHB or only
804 * informative error there, we continue poking it.
805 * Otherwise, we need actions to be taken by upper
808 if (ret > EEH_NEXT_ERR_INF)
815 struct pnv_eeh_ops ioda_eeh_ops = {
816 .post_init = ioda_eeh_post_init,
817 .set_option = ioda_eeh_set_option,
818 .get_state = ioda_eeh_get_state,
819 .reset = ioda_eeh_reset,
820 .configure_bridge = ioda_eeh_configure_bridge,
821 .next_error = ioda_eeh_next_error