powerpc/pci: Mask linkDown on resetting PCI bus
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / platforms / powernv / eeh-ioda.c
1 /*
2  * The file intends to implement the functions needed by EEH, which is
3  * built on IODA compliant chip. Actually, lots of functions related
4  * to EEH would be built based on the OPAL APIs.
5  *
6  * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  */
13
14 #include <linux/bootmem.h>
15 #include <linux/debugfs.h>
16 #include <linux/delay.h>
17 #include <linux/io.h>
18 #include <linux/irq.h>
19 #include <linux/kernel.h>
20 #include <linux/msi.h>
21 #include <linux/notifier.h>
22 #include <linux/pci.h>
23 #include <linux/string.h>
24
25 #include <asm/eeh.h>
26 #include <asm/eeh_event.h>
27 #include <asm/io.h>
28 #include <asm/iommu.h>
29 #include <asm/msi_bitmap.h>
30 #include <asm/opal.h>
31 #include <asm/pci-bridge.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/tce.h>
34
35 #include "powernv.h"
36 #include "pci.h"
37
38 static int ioda_eeh_nb_init = 0;
39
40 static int ioda_eeh_event(struct notifier_block *nb,
41                           unsigned long events, void *change)
42 {
43         uint64_t changed_evts = (uint64_t)change;
44
45         /*
46          * We simply send special EEH event if EEH has
47          * been enabled, or clear pending events in
48          * case that we enable EEH soon
49          */
50         if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
51             !(events & OPAL_EVENT_PCI_ERROR))
52                 return 0;
53
54         if (eeh_enabled())
55                 eeh_send_failure_event(NULL);
56         else
57                 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
58
59         return 0;
60 }
61
62 static struct notifier_block ioda_eeh_nb = {
63         .notifier_call  = ioda_eeh_event,
64         .next           = NULL,
65         .priority       = 0
66 };
67
68 #ifdef CONFIG_DEBUG_FS
69 static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
70 {
71         struct pci_controller *hose = data;
72         struct pnv_phb *phb = hose->private_data;
73
74         out_be64(phb->regs + offset, val);
75         return 0;
76 }
77
78 static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
79 {
80         struct pci_controller *hose = data;
81         struct pnv_phb *phb = hose->private_data;
82
83         *val = in_be64(phb->regs + offset);
84         return 0;
85 }
86
87 static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
88 {
89         return ioda_eeh_dbgfs_set(data, 0xD10, val);
90 }
91
92 static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
93 {
94         return ioda_eeh_dbgfs_get(data, 0xD10, val);
95 }
96
97 static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
98 {
99         return ioda_eeh_dbgfs_set(data, 0xD90, val);
100 }
101
102 static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
103 {
104         return ioda_eeh_dbgfs_get(data, 0xD90, val);
105 }
106
107 static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
108 {
109         return ioda_eeh_dbgfs_set(data, 0xE10, val);
110 }
111
112 static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
113 {
114         return ioda_eeh_dbgfs_get(data, 0xE10, val);
115 }
116
117 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
118                         ioda_eeh_outb_dbgfs_set, "0x%llx\n");
119 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
120                         ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
121 DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
122                         ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
123 #endif /* CONFIG_DEBUG_FS */
124
125
126 /**
127  * ioda_eeh_post_init - Chip dependent post initialization
128  * @hose: PCI controller
129  *
130  * The function will be called after eeh PEs and devices
131  * have been built. That means the EEH is ready to supply
132  * service with I/O cache.
133  */
134 static int ioda_eeh_post_init(struct pci_controller *hose)
135 {
136         struct pnv_phb *phb = hose->private_data;
137         int ret;
138
139         /* Register OPAL event notifier */
140         if (!ioda_eeh_nb_init) {
141                 ret = opal_notifier_register(&ioda_eeh_nb);
142                 if (ret) {
143                         pr_err("%s: Can't register OPAL event notifier (%d)\n",
144                                __func__, ret);
145                         return ret;
146                 }
147
148                 ioda_eeh_nb_init = 1;
149         }
150
151 #ifdef CONFIG_DEBUG_FS
152         if (!phb->has_dbgfs && phb->dbgfs) {
153                 phb->has_dbgfs = 1;
154
155                 debugfs_create_file("err_injct_outbound", 0600,
156                                     phb->dbgfs, hose,
157                                     &ioda_eeh_outb_dbgfs_ops);
158                 debugfs_create_file("err_injct_inboundA", 0600,
159                                     phb->dbgfs, hose,
160                                     &ioda_eeh_inbA_dbgfs_ops);
161                 debugfs_create_file("err_injct_inboundB", 0600,
162                                     phb->dbgfs, hose,
163                                     &ioda_eeh_inbB_dbgfs_ops);
164         }
165 #endif
166
167         /* If EEH is enabled, we're going to rely on that.
168          * Otherwise, we restore to conventional mechanism
169          * to clear frozen PE during PCI config access.
170          */
171         if (eeh_enabled())
172                 phb->flags |= PNV_PHB_FLAG_EEH;
173         else
174                 phb->flags &= ~PNV_PHB_FLAG_EEH;
175
176         return 0;
177 }
178
179 /**
180  * ioda_eeh_set_option - Set EEH operation or I/O setting
181  * @pe: EEH PE
182  * @option: options
183  *
184  * Enable or disable EEH option for the indicated PE. The
185  * function also can be used to enable I/O or DMA for the
186  * PE.
187  */
188 static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
189 {
190         s64 ret;
191         u32 pe_no;
192         struct pci_controller *hose = pe->phb;
193         struct pnv_phb *phb = hose->private_data;
194
195         /* Check on PE number */
196         if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
197                 pr_err("%s: PE address %x out of range [0, %x] "
198                        "on PHB#%x\n",
199                         __func__, pe->addr, phb->ioda.total_pe,
200                         hose->global_number);
201                 return -EINVAL;
202         }
203
204         pe_no = pe->addr;
205         switch (option) {
206         case EEH_OPT_DISABLE:
207                 ret = -EEXIST;
208                 break;
209         case EEH_OPT_ENABLE:
210                 ret = 0;
211                 break;
212         case EEH_OPT_THAW_MMIO:
213                 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
214                                 OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO);
215                 if (ret) {
216                         pr_warning("%s: Failed to enable MMIO for "
217                                    "PHB#%x-PE#%x, err=%lld\n",
218                                 __func__, hose->global_number, pe_no, ret);
219                         return -EIO;
220                 }
221
222                 break;
223         case EEH_OPT_THAW_DMA:
224                 ret = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
225                                 OPAL_EEH_ACTION_CLEAR_FREEZE_DMA);
226                 if (ret) {
227                         pr_warning("%s: Failed to enable DMA for "
228                                    "PHB#%x-PE#%x, err=%lld\n",
229                                 __func__, hose->global_number, pe_no, ret);
230                         return -EIO;
231                 }
232
233                 break;
234         default:
235                 pr_warning("%s: Invalid option %d\n", __func__, option);
236                 return -EINVAL;
237         }
238
239         return ret;
240 }
241
242 static void ioda_eeh_phb_diag(struct pci_controller *hose)
243 {
244         struct pnv_phb *phb = hose->private_data;
245         long rc;
246
247         rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
248                                          PNV_PCI_DIAG_BUF_SIZE);
249         if (rc != OPAL_SUCCESS) {
250                 pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
251                             __func__, hose->global_number, rc);
252                 return;
253         }
254
255         pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
256 }
257
258 /**
259  * ioda_eeh_get_state - Retrieve the state of PE
260  * @pe: EEH PE
261  *
262  * The PE's state should be retrieved from the PEEV, PEST
263  * IODA tables. Since the OPAL has exported the function
264  * to do it, it'd better to use that.
265  */
266 static int ioda_eeh_get_state(struct eeh_pe *pe)
267 {
268         s64 ret = 0;
269         u8 fstate;
270         u16 pcierr;
271         u32 pe_no;
272         int result;
273         struct pci_controller *hose = pe->phb;
274         struct pnv_phb *phb = hose->private_data;
275
276         /*
277          * Sanity check on PE address. The PHB PE address should
278          * be zero.
279          */
280         if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
281                 pr_err("%s: PE address %x out of range [0, %x] "
282                        "on PHB#%x\n",
283                        __func__, pe->addr, phb->ioda.total_pe,
284                        hose->global_number);
285                 return EEH_STATE_NOT_SUPPORT;
286         }
287
288         /*
289          * If we're in middle of PE reset, return normal
290          * state to keep EEH core going. For PHB reset, we
291          * still expect to have fenced PHB cleared with
292          * PHB reset.
293          */
294         if (!(pe->type & EEH_PE_PHB) &&
295             (pe->state & EEH_PE_RESET)) {
296                 result = (EEH_STATE_MMIO_ACTIVE |
297                           EEH_STATE_DMA_ACTIVE |
298                           EEH_STATE_MMIO_ENABLED |
299                           EEH_STATE_DMA_ENABLED);
300                 return result;
301         }
302
303         /* Retrieve PE status through OPAL */
304         pe_no = pe->addr;
305         ret = opal_pci_eeh_freeze_status(phb->opal_id, pe_no,
306                         &fstate, &pcierr, NULL);
307         if (ret) {
308                 pr_err("%s: Failed to get EEH status on "
309                        "PHB#%x-PE#%x\n, err=%lld\n",
310                        __func__, hose->global_number, pe_no, ret);
311                 return EEH_STATE_NOT_SUPPORT;
312         }
313
314         /* Check PHB status */
315         if (pe->type & EEH_PE_PHB) {
316                 result = 0;
317                 result &= ~EEH_STATE_RESET_ACTIVE;
318
319                 if (pcierr != OPAL_EEH_PHB_ERROR) {
320                         result |= EEH_STATE_MMIO_ACTIVE;
321                         result |= EEH_STATE_DMA_ACTIVE;
322                         result |= EEH_STATE_MMIO_ENABLED;
323                         result |= EEH_STATE_DMA_ENABLED;
324                 } else if (!(pe->state & EEH_PE_ISOLATED)) {
325                         eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
326                         ioda_eeh_phb_diag(hose);
327                 }
328
329                 return result;
330         }
331
332         /* Parse result out */
333         result = 0;
334         switch (fstate) {
335         case OPAL_EEH_STOPPED_NOT_FROZEN:
336                 result &= ~EEH_STATE_RESET_ACTIVE;
337                 result |= EEH_STATE_MMIO_ACTIVE;
338                 result |= EEH_STATE_DMA_ACTIVE;
339                 result |= EEH_STATE_MMIO_ENABLED;
340                 result |= EEH_STATE_DMA_ENABLED;
341                 break;
342         case OPAL_EEH_STOPPED_MMIO_FREEZE:
343                 result &= ~EEH_STATE_RESET_ACTIVE;
344                 result |= EEH_STATE_DMA_ACTIVE;
345                 result |= EEH_STATE_DMA_ENABLED;
346                 break;
347         case OPAL_EEH_STOPPED_DMA_FREEZE:
348                 result &= ~EEH_STATE_RESET_ACTIVE;
349                 result |= EEH_STATE_MMIO_ACTIVE;
350                 result |= EEH_STATE_MMIO_ENABLED;
351                 break;
352         case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
353                 result &= ~EEH_STATE_RESET_ACTIVE;
354                 break;
355         case OPAL_EEH_STOPPED_RESET:
356                 result |= EEH_STATE_RESET_ACTIVE;
357                 break;
358         case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
359                 result |= EEH_STATE_UNAVAILABLE;
360                 break;
361         case OPAL_EEH_STOPPED_PERM_UNAVAIL:
362                 result |= EEH_STATE_NOT_SUPPORT;
363                 break;
364         default:
365                 pr_warning("%s: Unexpected EEH status 0x%x "
366                            "on PHB#%x-PE#%x\n",
367                            __func__, fstate, hose->global_number, pe_no);
368         }
369
370         /* Dump PHB diag-data for frozen PE */
371         if (result != EEH_STATE_NOT_SUPPORT &&
372             (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
373             (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
374             !(pe->state & EEH_PE_ISOLATED)) {
375                 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
376                 ioda_eeh_phb_diag(hose);
377         }
378
379         return result;
380 }
381
382 static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
383 {
384         s64 rc = OPAL_HARDWARE;
385
386         while (1) {
387                 rc = opal_pci_poll(phb->opal_id);
388                 if (rc <= 0)
389                         break;
390
391                 msleep(rc);
392         }
393
394         return rc;
395 }
396
397 static int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
398 {
399         struct pnv_phb *phb = hose->private_data;
400         s64 rc = OPAL_HARDWARE;
401
402         pr_debug("%s: Reset PHB#%x, option=%d\n",
403                  __func__, hose->global_number, option);
404
405         /* Issue PHB complete reset request */
406         if (option == EEH_RESET_FUNDAMENTAL ||
407             option == EEH_RESET_HOT)
408                 rc = opal_pci_reset(phb->opal_id,
409                                 OPAL_PHB_COMPLETE,
410                                 OPAL_ASSERT_RESET);
411         else if (option == EEH_RESET_DEACTIVATE)
412                 rc = opal_pci_reset(phb->opal_id,
413                                 OPAL_PHB_COMPLETE,
414                                 OPAL_DEASSERT_RESET);
415         if (rc < 0)
416                 goto out;
417
418         /*
419          * Poll state of the PHB until the request is done
420          * successfully. The PHB reset is usually PHB complete
421          * reset followed by hot reset on root bus. So we also
422          * need the PCI bus settlement delay.
423          */
424         rc = ioda_eeh_phb_poll(phb);
425         if (option == EEH_RESET_DEACTIVATE)
426                 msleep(EEH_PE_RST_SETTLE_TIME);
427 out:
428         if (rc != OPAL_SUCCESS)
429                 return -EIO;
430
431         return 0;
432 }
433
434 static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
435 {
436         struct pnv_phb *phb = hose->private_data;
437         s64 rc = OPAL_SUCCESS;
438
439         pr_debug("%s: Reset PHB#%x, option=%d\n",
440                  __func__, hose->global_number, option);
441
442         /*
443          * During the reset deassert time, we needn't care
444          * the reset scope because the firmware does nothing
445          * for fundamental or hot reset during deassert phase.
446          */
447         if (option == EEH_RESET_FUNDAMENTAL)
448                 rc = opal_pci_reset(phb->opal_id,
449                                 OPAL_PCI_FUNDAMENTAL_RESET,
450                                 OPAL_ASSERT_RESET);
451         else if (option == EEH_RESET_HOT)
452                 rc = opal_pci_reset(phb->opal_id,
453                                 OPAL_PCI_HOT_RESET,
454                                 OPAL_ASSERT_RESET);
455         else if (option == EEH_RESET_DEACTIVATE)
456                 rc = opal_pci_reset(phb->opal_id,
457                                 OPAL_PCI_HOT_RESET,
458                                 OPAL_DEASSERT_RESET);
459         if (rc < 0)
460                 goto out;
461
462         /* Poll state of the PHB until the request is done */
463         rc = ioda_eeh_phb_poll(phb);
464         if (option == EEH_RESET_DEACTIVATE)
465                 msleep(EEH_PE_RST_SETTLE_TIME);
466 out:
467         if (rc != OPAL_SUCCESS)
468                 return -EIO;
469
470         return 0;
471 }
472
473 static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option)
474
475 {
476         struct device_node *dn = pci_device_to_OF_node(dev);
477         struct eeh_dev *edev = of_node_to_eeh_dev(dn);
478         int aer = edev ? edev->aer_cap : 0;
479         u32 ctrl;
480
481         pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
482                  __func__, pci_domain_nr(dev->bus),
483                  dev->bus->number, option);
484
485         switch (option) {
486         case EEH_RESET_FUNDAMENTAL:
487         case EEH_RESET_HOT:
488                 /* Don't report linkDown event */
489                 if (aer) {
490                         eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
491                                              4, &ctrl);
492                         ctrl |= PCI_ERR_UNC_SURPDN;
493                         eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
494                                               4, ctrl);
495                 }
496
497                 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
498                 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
499                 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
500                 msleep(EEH_PE_RST_HOLD_TIME);
501
502                 break;
503         case EEH_RESET_DEACTIVATE:
504                 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
505                 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
506                 eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
507                 msleep(EEH_PE_RST_SETTLE_TIME);
508
509                 /* Continue reporting linkDown event */
510                 if (aer) {
511                         eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
512                                              4, &ctrl);
513                         ctrl &= ~PCI_ERR_UNC_SURPDN;
514                         eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
515                                               4, ctrl);
516                 }
517
518                 break;
519         }
520
521         return 0;
522 }
523
524 void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
525 {
526         struct pci_controller *hose;
527
528         if (pci_is_root_bus(dev->bus)) {
529                 hose = pci_bus_to_host(dev->bus);
530                 ioda_eeh_root_reset(hose, EEH_RESET_HOT);
531                 ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
532         } else {
533                 ioda_eeh_bridge_reset(dev, EEH_RESET_HOT);
534                 ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
535         }
536 }
537
538 /**
539  * ioda_eeh_reset - Reset the indicated PE
540  * @pe: EEH PE
541  * @option: reset option
542  *
543  * Do reset on the indicated PE. For PCI bus sensitive PE,
544  * we need to reset the parent p2p bridge. The PHB has to
545  * be reinitialized if the p2p bridge is root bridge. For
546  * PCI device sensitive PE, we will try to reset the device
547  * through FLR. For now, we don't have OPAL APIs to do HARD
548  * reset yet, so all reset would be SOFT (HOT) reset.
549  */
550 static int ioda_eeh_reset(struct eeh_pe *pe, int option)
551 {
552         struct pci_controller *hose = pe->phb;
553         struct pci_bus *bus;
554         int ret;
555
556         /*
557          * For PHB reset, we always have complete reset. For those PEs whose
558          * primary bus derived from root complex (root bus) or root port
559          * (usually bus#1), we apply hot or fundamental reset on the root port.
560          * For other PEs, we always have hot reset on the PE primary bus.
561          *
562          * Here, we have different design to pHyp, which always clear the
563          * frozen state during PE reset. However, the good idea here from
564          * benh is to keep frozen state before we get PE reset done completely
565          * (until BAR restore). With the frozen state, HW drops illegal IO
566          * or MMIO access, which can incur recrusive frozen PE during PE
567          * reset. The side effect is that EEH core has to clear the frozen
568          * state explicitly after BAR restore.
569          */
570         if (pe->type & EEH_PE_PHB) {
571                 ret = ioda_eeh_phb_reset(hose, option);
572         } else {
573                 bus = eeh_pe_bus_get(pe);
574                 if (pci_is_root_bus(bus) ||
575                     pci_is_root_bus(bus->parent))
576                         ret = ioda_eeh_root_reset(hose, option);
577                 else
578                         ret = ioda_eeh_bridge_reset(bus->self, option);
579         }
580
581         return ret;
582 }
583
584 /**
585  * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
586  * @pe: EEH PE
587  *
588  * For particular PE, it might have included PCI bridges. In order
589  * to make the PE work properly, those PCI bridges should be configured
590  * correctly. However, we need do nothing on P7IOC since the reset
591  * function will do everything that should be covered by the function.
592  */
593 static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
594 {
595         return 0;
596 }
597
598 static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
599 {
600         /* GEM */
601         pr_info("  GEM XFIR:        %016llx\n", data->gemXfir);
602         pr_info("  GEM RFIR:        %016llx\n", data->gemRfir);
603         pr_info("  GEM RIRQFIR:     %016llx\n", data->gemRirqfir);
604         pr_info("  GEM Mask:        %016llx\n", data->gemMask);
605         pr_info("  GEM RWOF:        %016llx\n", data->gemRwof);
606
607         /* LEM */
608         pr_info("  LEM FIR:         %016llx\n", data->lemFir);
609         pr_info("  LEM Error Mask:  %016llx\n", data->lemErrMask);
610         pr_info("  LEM Action 0:    %016llx\n", data->lemAction0);
611         pr_info("  LEM Action 1:    %016llx\n", data->lemAction1);
612         pr_info("  LEM WOF:         %016llx\n", data->lemWof);
613 }
614
615 static void ioda_eeh_hub_diag(struct pci_controller *hose)
616 {
617         struct pnv_phb *phb = hose->private_data;
618         struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
619         long rc;
620
621         rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
622         if (rc != OPAL_SUCCESS) {
623                 pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
624                            __func__, phb->hub_id, rc);
625                 return;
626         }
627
628         switch (data->type) {
629         case OPAL_P7IOC_DIAG_TYPE_RGC:
630                 pr_info("P7IOC diag-data for RGC\n\n");
631                 ioda_eeh_hub_diag_common(data);
632                 pr_info("  RGC Status:      %016llx\n", data->rgc.rgcStatus);
633                 pr_info("  RGC LDCP:        %016llx\n", data->rgc.rgcLdcp);
634                 break;
635         case OPAL_P7IOC_DIAG_TYPE_BI:
636                 pr_info("P7IOC diag-data for BI %s\n\n",
637                         data->bi.biDownbound ? "Downbound" : "Upbound");
638                 ioda_eeh_hub_diag_common(data);
639                 pr_info("  BI LDCP 0:       %016llx\n", data->bi.biLdcp0);
640                 pr_info("  BI LDCP 1:       %016llx\n", data->bi.biLdcp1);
641                 pr_info("  BI LDCP 2:       %016llx\n", data->bi.biLdcp2);
642                 pr_info("  BI Fence Status: %016llx\n", data->bi.biFenceStatus);
643                 break;
644         case OPAL_P7IOC_DIAG_TYPE_CI:
645                 pr_info("P7IOC diag-data for CI Port %d\\nn",
646                         data->ci.ciPort);
647                 ioda_eeh_hub_diag_common(data);
648                 pr_info("  CI Port Status:  %016llx\n", data->ci.ciPortStatus);
649                 pr_info("  CI Port LDCP:    %016llx\n", data->ci.ciPortLdcp);
650                 break;
651         case OPAL_P7IOC_DIAG_TYPE_MISC:
652                 pr_info("P7IOC diag-data for MISC\n\n");
653                 ioda_eeh_hub_diag_common(data);
654                 break;
655         case OPAL_P7IOC_DIAG_TYPE_I2C:
656                 pr_info("P7IOC diag-data for I2C\n\n");
657                 ioda_eeh_hub_diag_common(data);
658                 break;
659         default:
660                 pr_warning("%s: Invalid type of HUB#%llx diag-data (%d)\n",
661                            __func__, phb->hub_id, data->type);
662         }
663 }
664
665 static int ioda_eeh_get_pe(struct pci_controller *hose,
666                            u16 pe_no, struct eeh_pe **pe)
667 {
668         struct eeh_pe *phb_pe, *dev_pe;
669         struct eeh_dev dev;
670
671         /* Find the PHB PE */
672         phb_pe = eeh_phb_pe_get(hose);
673         if (!phb_pe)
674                 return -EEXIST;
675
676         /* Find the PE according to PE# */
677         memset(&dev, 0, sizeof(struct eeh_dev));
678         dev.phb = hose;
679         dev.pe_config_addr = pe_no;
680         dev_pe = eeh_pe_get(&dev);
681         if (!dev_pe) return -EEXIST;
682
683         *pe = dev_pe;
684         return 0;
685 }
686
687 /**
688  * ioda_eeh_next_error - Retrieve next error for EEH core to handle
689  * @pe: The affected PE
690  *
691  * The function is expected to be called by EEH core while it gets
692  * special EEH event (without binding PE). The function calls to
693  * OPAL APIs for next error to handle. The informational error is
694  * handled internally by platform. However, the dead IOC, dead PHB,
695  * fenced PHB and frozen PE should be handled by EEH core eventually.
696  */
697 static int ioda_eeh_next_error(struct eeh_pe **pe)
698 {
699         struct pci_controller *hose;
700         struct pnv_phb *phb;
701         struct eeh_pe *phb_pe;
702         u64 frozen_pe_no;
703         u16 err_type, severity;
704         long rc;
705         int ret = EEH_NEXT_ERR_NONE;
706
707         /*
708          * While running here, it's safe to purge the event queue.
709          * And we should keep the cached OPAL notifier event sychronized
710          * between the kernel and firmware.
711          */
712         eeh_remove_event(NULL);
713         opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
714
715         list_for_each_entry(hose, &hose_list, list_node) {
716                 /*
717                  * If the subordinate PCI buses of the PHB has been
718                  * removed or is exactly under error recovery, we
719                  * needn't take care of it any more.
720                  */
721                 phb = hose->private_data;
722                 phb_pe = eeh_phb_pe_get(hose);
723                 if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
724                         continue;
725
726                 rc = opal_pci_next_error(phb->opal_id,
727                                 &frozen_pe_no, &err_type, &severity);
728
729                 /* If OPAL API returns error, we needn't proceed */
730                 if (rc != OPAL_SUCCESS) {
731                         pr_devel("%s: Invalid return value on "
732                                  "PHB#%x (0x%lx) from opal_pci_next_error",
733                                  __func__, hose->global_number, rc);
734                         continue;
735                 }
736
737                 /* If the PHB doesn't have error, stop processing */
738                 if (err_type == OPAL_EEH_NO_ERROR ||
739                     severity == OPAL_EEH_SEV_NO_ERROR) {
740                         pr_devel("%s: No error found on PHB#%x\n",
741                                  __func__, hose->global_number);
742                         continue;
743                 }
744
745                 /*
746                  * Processing the error. We're expecting the error with
747                  * highest priority reported upon multiple errors on the
748                  * specific PHB.
749                  */
750                 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
751                          __func__, err_type, severity,
752                          frozen_pe_no, hose->global_number);
753                 switch (err_type) {
754                 case OPAL_EEH_IOC_ERROR:
755                         if (severity == OPAL_EEH_SEV_IOC_DEAD) {
756                                 pr_err("EEH: dead IOC detected\n");
757                                 ret = EEH_NEXT_ERR_DEAD_IOC;
758                         } else if (severity == OPAL_EEH_SEV_INF) {
759                                 pr_info("EEH: IOC informative error "
760                                         "detected\n");
761                                 ioda_eeh_hub_diag(hose);
762                                 ret = EEH_NEXT_ERR_NONE;
763                         }
764
765                         break;
766                 case OPAL_EEH_PHB_ERROR:
767                         if (severity == OPAL_EEH_SEV_PHB_DEAD) {
768                                 *pe = phb_pe;
769                                 pr_err("EEH: dead PHB#%x detected\n",
770                                         hose->global_number);
771                                 ret = EEH_NEXT_ERR_DEAD_PHB;
772                         } else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
773                                 *pe = phb_pe;
774                                 pr_err("EEH: fenced PHB#%x detected\n",
775                                         hose->global_number);
776                                 ret = EEH_NEXT_ERR_FENCED_PHB;
777                         } else if (severity == OPAL_EEH_SEV_INF) {
778                                 pr_info("EEH: PHB#%x informative error "
779                                         "detected\n",
780                                         hose->global_number);
781                                 ioda_eeh_phb_diag(hose);
782                                 ret = EEH_NEXT_ERR_NONE;
783                         }
784
785                         break;
786                 case OPAL_EEH_PE_ERROR:
787                         /*
788                          * If we can't find the corresponding PE, the
789                          * PEEV / PEST would be messy. So we force an
790                          * fenced PHB so that it can be recovered.
791                          *
792                          * If the PE has been marked as isolated, that
793                          * should have been removed permanently or in
794                          * progress with recovery. We needn't report
795                          * it again.
796                          */
797                         if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) {
798                                 *pe = phb_pe;
799                                 pr_err("EEH: Escalated fenced PHB#%x "
800                                        "detected for PE#%llx\n",
801                                         hose->global_number,
802                                         frozen_pe_no);
803                                 ret = EEH_NEXT_ERR_FENCED_PHB;
804                         } else if ((*pe)->state & EEH_PE_ISOLATED) {
805                                 ret = EEH_NEXT_ERR_NONE;
806                         } else {
807                                 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
808                                         (*pe)->addr, (*pe)->phb->global_number);
809                                 ret = EEH_NEXT_ERR_FROZEN_PE;
810                         }
811
812                         break;
813                 default:
814                         pr_warn("%s: Unexpected error type %d\n",
815                                 __func__, err_type);
816                 }
817
818                 /*
819                  * EEH core will try recover from fenced PHB or
820                  * frozen PE. In the time for frozen PE, EEH core
821                  * enable IO path for that before collecting logs,
822                  * but it ruins the site. So we have to dump the
823                  * log in advance here.
824                  */
825                 if ((ret == EEH_NEXT_ERR_FROZEN_PE  ||
826                     ret == EEH_NEXT_ERR_FENCED_PHB) &&
827                     !((*pe)->state & EEH_PE_ISOLATED)) {
828                         eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
829                         ioda_eeh_phb_diag(hose);
830                 }
831
832                 /*
833                  * If we have no errors on the specific PHB or only
834                  * informative error there, we continue poking it.
835                  * Otherwise, we need actions to be taken by upper
836                  * layer.
837                  */
838                 if (ret > EEH_NEXT_ERR_INF)
839                         break;
840         }
841
842         return ret;
843 }
844
845 struct pnv_eeh_ops ioda_eeh_ops = {
846         .post_init              = ioda_eeh_post_init,
847         .set_option             = ioda_eeh_set_option,
848         .get_state              = ioda_eeh_get_state,
849         .reset                  = ioda_eeh_reset,
850         .configure_bridge       = ioda_eeh_configure_bridge,
851         .next_error             = ioda_eeh_next_error
852 };