2 * Support PCI/PCIe on PowerNV platforms
4 * Currently supports only P5IOC2
6 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/bootmem.h>
20 #include <linux/irq.h>
22 #include <linux/msi.h>
23 #include <linux/iommu.h>
25 #include <asm/sections.h>
28 #include <asm/pci-bridge.h>
29 #include <asm/machdep.h>
30 #include <asm/msi_bitmap.h>
31 #include <asm/ppc-pci.h>
33 #include <asm/iommu.h>
35 #include <asm/firmware.h>
36 #include <asm/eeh_event.h>
43 #define PCI_RESET_DELAY_US 3000000
45 #define cfg_dbg(fmt...) do { } while(0)
46 //#define cfg_dbg(fmt...) printk(fmt)
49 static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
51 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
52 struct pnv_phb *phb = hose->private_data;
53 struct pci_dn *pdn = pci_get_pdn(pdev);
55 if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
58 return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
61 static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
63 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
64 struct pnv_phb *phb = hose->private_data;
65 struct msi_desc *entry;
74 list_for_each_entry(entry, &pdev->msi_list, list) {
75 if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
76 pr_warn("%s: Supports only 64-bit MSIs\n",
80 hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
82 pr_warn("%s: Failed to find a free MSI\n",
86 virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
88 pr_warn("%s: Failed to map MSI to linux irq\n",
90 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
93 rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
94 virq, entry->msi_attrib.is_64, &msg);
96 pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
97 irq_dispose_mapping(virq);
98 msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
101 irq_set_msi_desc(virq, entry);
102 write_msi_msg(virq, &msg);
107 static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
109 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
110 struct pnv_phb *phb = hose->private_data;
111 struct msi_desc *entry;
116 list_for_each_entry(entry, &pdev->msi_list, list) {
117 if (entry->irq == NO_IRQ)
119 irq_set_msi_desc(entry->irq, NULL);
120 msi_bitmap_free_hwirqs(&phb->msi_bmp,
121 virq_to_hw(entry->irq) - phb->msi_base, 1);
122 irq_dispose_mapping(entry->irq);
125 #endif /* CONFIG_PCI_MSI */
127 static void pnv_pci_dump_p7ioc_diag_data(struct pnv_phb *phb)
129 struct OpalIoP7IOCPhbErrorData *data = &phb->diag.p7ioc;
132 pr_info("PHB %d diagnostic data:\n", phb->hose->global_number);
134 pr_info(" brdgCtl = 0x%08x\n", data->brdgCtl);
136 pr_info(" portStatusReg = 0x%08x\n", data->portStatusReg);
137 pr_info(" rootCmplxStatus = 0x%08x\n", data->rootCmplxStatus);
138 pr_info(" busAgentStatus = 0x%08x\n", data->busAgentStatus);
140 pr_info(" deviceStatus = 0x%08x\n", data->deviceStatus);
141 pr_info(" slotStatus = 0x%08x\n", data->slotStatus);
142 pr_info(" linkStatus = 0x%08x\n", data->linkStatus);
143 pr_info(" devCmdStatus = 0x%08x\n", data->devCmdStatus);
144 pr_info(" devSecStatus = 0x%08x\n", data->devSecStatus);
146 pr_info(" rootErrorStatus = 0x%08x\n", data->rootErrorStatus);
147 pr_info(" uncorrErrorStatus = 0x%08x\n", data->uncorrErrorStatus);
148 pr_info(" corrErrorStatus = 0x%08x\n", data->corrErrorStatus);
149 pr_info(" tlpHdr1 = 0x%08x\n", data->tlpHdr1);
150 pr_info(" tlpHdr2 = 0x%08x\n", data->tlpHdr2);
151 pr_info(" tlpHdr3 = 0x%08x\n", data->tlpHdr3);
152 pr_info(" tlpHdr4 = 0x%08x\n", data->tlpHdr4);
153 pr_info(" sourceId = 0x%08x\n", data->sourceId);
155 pr_info(" errorClass = 0x%016llx\n", data->errorClass);
156 pr_info(" correlator = 0x%016llx\n", data->correlator);
158 pr_info(" p7iocPlssr = 0x%016llx\n", data->p7iocPlssr);
159 pr_info(" p7iocCsr = 0x%016llx\n", data->p7iocCsr);
160 pr_info(" lemFir = 0x%016llx\n", data->lemFir);
161 pr_info(" lemErrorMask = 0x%016llx\n", data->lemErrorMask);
162 pr_info(" lemWOF = 0x%016llx\n", data->lemWOF);
163 pr_info(" phbErrorStatus = 0x%016llx\n", data->phbErrorStatus);
164 pr_info(" phbFirstErrorStatus = 0x%016llx\n", data->phbFirstErrorStatus);
165 pr_info(" phbErrorLog0 = 0x%016llx\n", data->phbErrorLog0);
166 pr_info(" phbErrorLog1 = 0x%016llx\n", data->phbErrorLog1);
167 pr_info(" mmioErrorStatus = 0x%016llx\n", data->mmioErrorStatus);
168 pr_info(" mmioFirstErrorStatus = 0x%016llx\n", data->mmioFirstErrorStatus);
169 pr_info(" mmioErrorLog0 = 0x%016llx\n", data->mmioErrorLog0);
170 pr_info(" mmioErrorLog1 = 0x%016llx\n", data->mmioErrorLog1);
171 pr_info(" dma0ErrorStatus = 0x%016llx\n", data->dma0ErrorStatus);
172 pr_info(" dma0FirstErrorStatus = 0x%016llx\n", data->dma0FirstErrorStatus);
173 pr_info(" dma0ErrorLog0 = 0x%016llx\n", data->dma0ErrorLog0);
174 pr_info(" dma0ErrorLog1 = 0x%016llx\n", data->dma0ErrorLog1);
175 pr_info(" dma1ErrorStatus = 0x%016llx\n", data->dma1ErrorStatus);
176 pr_info(" dma1FirstErrorStatus = 0x%016llx\n", data->dma1FirstErrorStatus);
177 pr_info(" dma1ErrorLog0 = 0x%016llx\n", data->dma1ErrorLog0);
178 pr_info(" dma1ErrorLog1 = 0x%016llx\n", data->dma1ErrorLog1);
180 for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
181 if ((data->pestA[i] >> 63) == 0 &&
182 (data->pestB[i] >> 63) == 0)
184 pr_info(" PE[%3d] PESTA = 0x%016llx\n", i, data->pestA[i]);
185 pr_info(" PESTB = 0x%016llx\n", data->pestB[i]);
189 static void pnv_pci_dump_phb_diag_data(struct pnv_phb *phb)
192 case PNV_PHB_MODEL_P7IOC:
193 pnv_pci_dump_p7ioc_diag_data(phb);
196 pr_warning("PCI %d: Can't decode this PHB diag data\n",
197 phb->hose->global_number);
201 static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
203 unsigned long flags, rc;
206 spin_lock_irqsave(&phb->lock, flags);
208 rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
209 PNV_PCI_DIAG_BUF_SIZE);
210 has_diag = (rc == OPAL_SUCCESS);
212 rc = opal_pci_eeh_freeze_clear(phb->opal_id, pe_no,
213 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
215 pr_warning("PCI %d: Failed to clear EEH freeze state"
216 " for PE#%d, err %ld\n",
217 phb->hose->global_number, pe_no, rc);
219 /* For now, let's only display the diag buffer when we fail to clear
220 * the EEH status. We'll do more sensible things later when we have
221 * proper EEH support. We need to make sure we don't pollute ourselves
222 * with the normal errors generated when probing empty slots
225 pnv_pci_dump_phb_diag_data(phb);
227 pr_warning("PCI %d: No diag data available\n",
228 phb->hose->global_number);
231 spin_unlock_irqrestore(&phb->lock, flags);
234 static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
235 struct device_node *dn)
243 * Get the PE#. During the PCI probe stage, we might not
244 * setup that yet. So all ER errors should be mapped to
247 pe_no = PCI_DN(dn)->pe_number;
248 if (pe_no == IODA_INVALID_PE) {
249 if (phb->type == PNV_PHB_P5IOC2)
252 pe_no = phb->ioda.reserved_pe;
255 /* Read freeze status */
256 rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr,
259 pr_warning("%s: Can't read EEH status (PE#%d) for "
261 __func__, pe_no, dn->full_name, rc);
264 cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
265 (PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn),
268 pnv_pci_handle_eeh_config(phb, pe_no);
271 int pnv_pci_cfg_read(struct device_node *dn,
272 int where, int size, u32 *val)
274 struct pci_dn *pdn = PCI_DN(dn);
275 struct pnv_phb *phb = pdn->phb->private_data;
276 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
278 struct eeh_pe *phb_pe = NULL;
285 rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
286 *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
291 rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
293 *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
298 rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
299 *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
303 return PCIBIOS_FUNC_NOT_SUPPORTED;
305 cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
306 __func__, pdn->busno, pdn->devfn, where, size, *val);
309 * Check if the specified PE has been put into frozen
310 * state. On the other hand, we needn't do that while
311 * the PHB has been put into frozen state because of
315 phb_pe = eeh_phb_pe_get(pdn->phb);
316 if (phb_pe && (phb_pe->state & EEH_PE_ISOLATED))
317 return PCIBIOS_SUCCESSFUL;
319 if (phb->eeh_state & PNV_EEH_STATE_ENABLED) {
320 if (*val == EEH_IO_ERROR_VALUE(size) &&
321 eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
322 return PCIBIOS_DEVICE_NOT_FOUND;
324 pnv_pci_config_check_eeh(phb, dn);
327 pnv_pci_config_check_eeh(phb, dn);
330 return PCIBIOS_SUCCESSFUL;
333 int pnv_pci_cfg_write(struct device_node *dn,
334 int where, int size, u32 val)
336 struct pci_dn *pdn = PCI_DN(dn);
337 struct pnv_phb *phb = pdn->phb->private_data;
338 u32 bdfn = (pdn->busno << 8) | pdn->devfn;
340 cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
341 pdn->busno, pdn->devfn, where, size, val);
344 opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
347 opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
350 opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
353 return PCIBIOS_FUNC_NOT_SUPPORTED;
356 /* Check if the PHB got frozen due to an error (no response) */
358 if (!(phb->eeh_state & PNV_EEH_STATE_ENABLED))
359 pnv_pci_config_check_eeh(phb, dn);
361 pnv_pci_config_check_eeh(phb, dn);
364 return PCIBIOS_SUCCESSFUL;
367 static int pnv_pci_read_config(struct pci_bus *bus,
369 int where, int size, u32 *val)
371 struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
374 for (dn = busdn->child; dn; dn = dn->sibling) {
376 if (pdn && pdn->devfn == devfn)
377 return pnv_pci_cfg_read(dn, where, size, val);
381 return PCIBIOS_DEVICE_NOT_FOUND;
385 static int pnv_pci_write_config(struct pci_bus *bus,
387 int where, int size, u32 val)
389 struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
392 for (dn = busdn->child; dn; dn = dn->sibling) {
394 if (pdn && pdn->devfn == devfn)
395 return pnv_pci_cfg_write(dn, where, size, val);
398 return PCIBIOS_DEVICE_NOT_FOUND;
401 struct pci_ops pnv_pci_ops = {
402 .read = pnv_pci_read_config,
403 .write = pnv_pci_write_config,
406 static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
407 unsigned long uaddr, enum dma_data_direction direction,
408 struct dma_attrs *attrs, bool rm)
414 proto_tce = TCE_PCI_READ; // Read allowed
416 if (direction != DMA_TO_DEVICE)
417 proto_tce |= TCE_PCI_WRITE;
419 tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
420 rpn = __pa(uaddr) >> TCE_SHIFT;
423 *(tcep++) = cpu_to_be64(proto_tce | (rpn++ << TCE_RPN_SHIFT));
425 /* Some implementations won't cache invalid TCEs and thus may not
426 * need that flush. We'll probably turn it_type into a bit mask
427 * of flags if that becomes the case
429 if (tbl->it_type & TCE_PCI_SWINV_CREATE)
430 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
435 static int pnv_tce_build_vm(struct iommu_table *tbl, long index, long npages,
437 enum dma_data_direction direction,
438 struct dma_attrs *attrs)
440 return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs,
444 static void pnv_tce_free(struct iommu_table *tbl, long index, long npages,
449 tces = tcep = ((__be64 *)tbl->it_base) + index - tbl->it_offset;
452 *(tcep++) = cpu_to_be64(0);
454 if (tbl->it_type & TCE_PCI_SWINV_FREE)
455 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1, rm);
458 static void pnv_tce_free_vm(struct iommu_table *tbl, long index, long npages)
460 pnv_tce_free(tbl, index, npages, false);
463 static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
465 return ((u64 *)tbl->it_base)[index - tbl->it_offset];
468 static int pnv_tce_build_rm(struct iommu_table *tbl, long index, long npages,
470 enum dma_data_direction direction,
471 struct dma_attrs *attrs)
473 return pnv_tce_build(tbl, index, npages, uaddr, direction, attrs, true);
476 static void pnv_tce_free_rm(struct iommu_table *tbl, long index, long npages)
478 pnv_tce_free(tbl, index, npages, true);
481 void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
482 void *tce_mem, u64 tce_size,
485 tbl->it_blocksize = 16;
486 tbl->it_base = (unsigned long)tce_mem;
487 tbl->it_offset = dma_offset >> IOMMU_PAGE_SHIFT;
489 tbl->it_size = tce_size >> 3;
491 tbl->it_type = TCE_PCI;
494 static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
496 struct iommu_table *tbl;
497 const __be64 *basep, *swinvp;
500 basep = of_get_property(hose->dn, "linux,tce-base", NULL);
501 sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
502 if (basep == NULL || sizep == NULL) {
503 pr_err("PCI: %s has missing tce entries !\n",
504 hose->dn->full_name);
507 tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
510 pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
511 be32_to_cpup(sizep), 0);
512 iommu_init_table(tbl, hose->node);
513 iommu_register_group(tbl, pci_domain_nr(hose->bus), 0);
515 /* Deal with SW invalidated TCEs when needed (BML way) */
516 swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
519 tbl->it_busno = be64_to_cpu(swinvp[1]);
520 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
521 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
526 static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
527 struct pci_dev *pdev)
529 struct device_node *np = pci_bus_to_OF_node(hose->bus);
535 if (!pdn->iommu_table)
536 pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
537 if (!pdn->iommu_table)
539 set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
542 static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
544 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
545 struct pnv_phb *phb = hose->private_data;
547 /* If we have no phb structure, try to setup a fallback based on
548 * the device-tree (RTAS PCI for example)
550 if (phb && phb->dma_dev_setup)
551 phb->dma_dev_setup(phb, pdev);
553 pnv_pci_dma_fallback_setup(hose, pdev);
556 void pnv_pci_shutdown(void)
558 struct pci_controller *hose;
560 list_for_each_entry(hose, &hose_list, list_node) {
561 struct pnv_phb *phb = hose->private_data;
563 if (phb && phb->shutdown)
568 /* Fixup wrong class code in p7ioc and p8 root complex */
569 static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
571 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
573 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
575 static int pnv_pci_probe_mode(struct pci_bus *bus)
577 struct pci_controller *hose = pci_bus_to_host(bus);
578 const __be64 *tstamp;
582 /* We hijack this as a way to ensure we have waited long
583 * enough since the reset was lifted on the PCI bus
585 if (bus != hose->bus)
586 return PCI_PROBE_NORMAL;
587 tstamp = of_get_property(hose->dn, "reset-clear-timestamp", NULL);
588 if (!tstamp || !*tstamp)
589 return PCI_PROBE_NORMAL;
591 now = mftb() / tb_ticks_per_usec;
592 target = (be64_to_cpup(tstamp) / tb_ticks_per_usec)
593 + PCI_RESET_DELAY_US;
595 pr_devel("pci %04d: Reset target: 0x%llx now: 0x%llx\n",
596 hose->global_number, target, now);
599 msleep((target - now + 999) / 1000);
601 return PCI_PROBE_NORMAL;
604 void __init pnv_pci_init(void)
606 struct device_node *np;
608 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
610 /* OPAL absent, try POPAL first then RTAS detection of PHBs */
611 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
612 #ifdef CONFIG_PPC_POWERNV_RTAS
613 init_pci_config_tokens();
614 find_and_init_phbs();
615 #endif /* CONFIG_PPC_POWERNV_RTAS */
617 /* OPAL is here, do our normal stuff */
621 /* Look for IODA IO-Hubs. We don't support mixing IODA
622 * and p5ioc2 due to the need to change some global
625 for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
626 pnv_pci_init_ioda_hub(np);
630 /* Look for p5ioc2 IO-Hubs */
632 for_each_compatible_node(np, NULL, "ibm,p5ioc2")
633 pnv_pci_init_p5ioc2_hub(np);
635 /* Look for ioda2 built-in PHB3's */
636 for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
637 pnv_pci_init_ioda2_phb(np);
640 /* Setup the linkage between OF nodes and PHBs */
643 /* Configure IOMMU DMA hooks */
644 ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
645 ppc_md.tce_build = pnv_tce_build_vm;
646 ppc_md.tce_free = pnv_tce_free_vm;
647 ppc_md.tce_build_rm = pnv_tce_build_rm;
648 ppc_md.tce_free_rm = pnv_tce_free_rm;
649 ppc_md.tce_get = pnv_tce_get;
650 ppc_md.pci_probe_mode = pnv_pci_probe_mode;
651 set_pci_dma_ops(&dma_iommu_ops);
654 #ifdef CONFIG_PCI_MSI
655 ppc_md.msi_check_device = pnv_msi_check_device;
656 ppc_md.setup_msi_irqs = pnv_setup_msi_irqs;
657 ppc_md.teardown_msi_irqs = pnv_teardown_msi_irqs;
661 static int tce_iommu_bus_notifier(struct notifier_block *nb,
662 unsigned long action, void *data)
664 struct device *dev = data;
667 case BUS_NOTIFY_ADD_DEVICE:
668 return iommu_add_device(dev);
669 case BUS_NOTIFY_DEL_DEVICE:
670 if (dev->iommu_group)
671 iommu_del_device(dev);
678 static struct notifier_block tce_iommu_bus_nb = {
679 .notifier_call = tce_iommu_bus_notifier,
682 static int __init tce_iommu_bus_notifier_init(void)
684 BUILD_BUG_ON(PAGE_SIZE < IOMMU_PAGE_SIZE);
686 bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
690 subsys_initcall_sync(tce_iommu_bus_notifier_init);