2 * Support PCI/PCIe on PowerNV platforms
4 * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/delay.h>
17 #include <linux/string.h>
18 #include <linux/init.h>
19 #include <linux/bootmem.h>
20 #include <linux/irq.h>
22 #include <linux/msi.h>
24 #include <asm/sections.h>
27 #include <asm/pci-bridge.h>
28 #include <asm/machdep.h>
29 #include <asm/msi_bitmap.h>
30 #include <asm/ppc-pci.h>
32 #include <asm/iommu.h>
39 #define define_pe_printk_level(func, kern_level) \
40 static int func(const struct pnv_ioda_pe *pe, const char *fmt, ...) \
42 struct va_format vaf; \
47 va_start(args, fmt); \
53 strlcpy(pfix, dev_name(&pe->pdev->dev), \
56 sprintf(pfix, "%04x:%02x ", \
57 pci_domain_nr(pe->pbus), \
59 r = printk(kern_level "pci %s: [PE# %.3d] %pV", \
60 pfix, pe->pe_number, &vaf); \
67 define_pe_printk_level(pe_err, KERN_ERR);
68 define_pe_printk_level(pe_warn, KERN_WARNING);
69 define_pe_printk_level(pe_info, KERN_INFO);
71 static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
76 pe = find_next_zero_bit(phb->ioda.pe_alloc,
77 phb->ioda.total_pe, 0);
78 if (pe >= phb->ioda.total_pe)
79 return IODA_INVALID_PE;
80 } while(test_and_set_bit(pe, phb->ioda.pe_alloc));
82 phb->ioda.pe_array[pe].phb = phb;
83 phb->ioda.pe_array[pe].pe_number = pe;
87 static void pnv_ioda_free_pe(struct pnv_phb *phb, int pe)
89 WARN_ON(phb->ioda.pe_array[pe].pdev);
91 memset(&phb->ioda.pe_array[pe], 0, sizeof(struct pnv_ioda_pe));
92 clear_bit(pe, phb->ioda.pe_alloc);
95 /* Currently those 2 are only used when MSIs are enabled, this will change
96 * but in the meantime, we need to protect them to avoid warnings
99 static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
101 struct pci_controller *hose = pci_bus_to_host(dev->bus);
102 struct pnv_phb *phb = hose->private_data;
103 struct pci_dn *pdn = pci_get_pdn(dev);
107 if (pdn->pe_number == IODA_INVALID_PE)
109 return &phb->ioda.pe_array[pdn->pe_number];
111 #endif /* CONFIG_PCI_MSI */
113 static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
115 struct pci_dev *parent;
116 uint8_t bcomp, dcomp, fcomp;
117 long rc, rid_end, rid;
119 /* Bus validation ? */
123 dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
124 fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
125 parent = pe->pbus->self;
126 if (pe->flags & PNV_IODA_PE_BUS_ALL)
127 count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
132 case 1: bcomp = OpalPciBusAll; break;
133 case 2: bcomp = OpalPciBus7Bits; break;
134 case 4: bcomp = OpalPciBus6Bits; break;
135 case 8: bcomp = OpalPciBus5Bits; break;
136 case 16: bcomp = OpalPciBus4Bits; break;
137 case 32: bcomp = OpalPciBus3Bits; break;
139 pr_err("%s: Number of subordinate busses %d"
141 pci_name(pe->pbus->self), count);
142 /* Do an exact match only */
143 bcomp = OpalPciBusAll;
145 rid_end = pe->rid + (count << 8);
147 parent = pe->pdev->bus->self;
148 bcomp = OpalPciBusAll;
149 dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
150 fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
151 rid_end = pe->rid + 1;
154 /* Associate PE in PELT */
155 rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
156 bcomp, dcomp, fcomp, OPAL_MAP_PE);
158 pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
161 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
162 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
164 /* Add to all parents PELT-V */
166 struct pci_dn *pdn = pci_get_pdn(parent);
167 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
168 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
169 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
170 /* XXX What to do in case of error ? */
172 parent = parent->bus->self;
174 /* Setup reverse map */
175 for (rid = pe->rid; rid < rid_end; rid++)
176 phb->ioda.pe_rmap[rid] = pe->pe_number;
178 /* Setup one MVTs on IODA1 */
179 if (phb->type == PNV_PHB_IODA1) {
180 pe->mve_number = pe->pe_number;
181 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number,
184 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
188 rc = opal_pci_set_mve_enable(phb->opal_id,
189 pe->mve_number, OPAL_ENABLE_MVE);
191 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
196 } else if (phb->type == PNV_PHB_IODA2)
202 static void pnv_ioda_link_pe_by_weight(struct pnv_phb *phb,
203 struct pnv_ioda_pe *pe)
205 struct pnv_ioda_pe *lpe;
207 list_for_each_entry(lpe, &phb->ioda.pe_dma_list, dma_link) {
208 if (lpe->dma_weight < pe->dma_weight) {
209 list_add_tail(&pe->dma_link, &lpe->dma_link);
213 list_add_tail(&pe->dma_link, &phb->ioda.pe_dma_list);
216 static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
218 /* This is quite simplistic. The "base" weight of a device
219 * is 10. 0 means no DMA is to be accounted for it.
222 /* If it's a bridge, no DMA */
223 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL)
226 /* Reduce the weight of slow USB controllers */
227 if (dev->class == PCI_CLASS_SERIAL_USB_UHCI ||
228 dev->class == PCI_CLASS_SERIAL_USB_OHCI ||
229 dev->class == PCI_CLASS_SERIAL_USB_EHCI)
232 /* Increase the weight of RAID (includes Obsidian) */
233 if ((dev->class >> 8) == PCI_CLASS_STORAGE_RAID)
241 static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
243 struct pci_controller *hose = pci_bus_to_host(dev->bus);
244 struct pnv_phb *phb = hose->private_data;
245 struct pci_dn *pdn = pci_get_pdn(dev);
246 struct pnv_ioda_pe *pe;
250 pr_err("%s: Device tree node not associated properly\n",
254 if (pdn->pe_number != IODA_INVALID_PE)
257 /* PE#0 has been pre-set */
258 if (dev->bus->number == 0)
261 pe_num = pnv_ioda_alloc_pe(phb);
262 if (pe_num == IODA_INVALID_PE) {
263 pr_warning("%s: Not enough PE# available, disabling device\n",
268 /* NOTE: We get only one ref to the pci_dev for the pdn, not for the
269 * pointer in the PE data structure, both should be destroyed at the
270 * same time. However, this needs to be looked at more closely again
271 * once we actually start removing things (Hotplug, SR-IOV, ...)
273 * At some point we want to remove the PDN completely anyways
275 pe = &phb->ioda.pe_array[pe_num];
278 pdn->pe_number = pe_num;
283 pe->rid = dev->bus->number << 8 | pdn->devfn;
285 pe_info(pe, "Associated device to PE\n");
287 if (pnv_ioda_configure_pe(phb, pe)) {
288 /* XXX What do we do here ? */
290 pnv_ioda_free_pe(phb, pe_num);
291 pdn->pe_number = IODA_INVALID_PE;
297 /* Assign a DMA weight to the device */
298 pe->dma_weight = pnv_ioda_dma_weight(dev);
299 if (pe->dma_weight != 0) {
300 phb->ioda.dma_weight += pe->dma_weight;
301 phb->ioda.dma_pe_count++;
305 pnv_ioda_link_pe_by_weight(phb, pe);
309 #endif /* Useful for SRIOV case */
311 static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
315 list_for_each_entry(dev, &bus->devices, bus_list) {
316 struct pci_dn *pdn = pci_get_pdn(dev);
319 pr_warn("%s: No device node associated with device !\n",
325 pdn->pe_number = pe->pe_number;
326 pe->dma_weight += pnv_ioda_dma_weight(dev);
327 if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
328 pnv_ioda_setup_same_PE(dev->subordinate, pe);
333 * There're 2 types of PCI bus sensitive PEs: One that is compromised of
334 * single PCI bus. Another one that contains the primary PCI bus and its
335 * subordinate PCI devices and buses. The second type of PE is normally
336 * orgiriated by PCIe-to-PCI bridge or PLX switch downstream ports.
338 static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
340 struct pci_controller *hose = pci_bus_to_host(bus);
341 struct pnv_phb *phb = hose->private_data;
342 struct pnv_ioda_pe *pe;
345 pe_num = pnv_ioda_alloc_pe(phb);
346 if (pe_num == IODA_INVALID_PE) {
347 pr_warning("%s: Not enough PE# available for PCI bus %04x:%02x\n",
348 __func__, pci_domain_nr(bus), bus->number);
352 pe = &phb->ioda.pe_array[pe_num];
353 pe->flags = (all ? PNV_IODA_PE_BUS_ALL : PNV_IODA_PE_BUS);
358 pe->rid = bus->busn_res.start << 8;
362 pe_info(pe, "Secondary bus %d..%d associated with PE#%d\n",
363 bus->busn_res.start, bus->busn_res.end, pe_num);
365 pe_info(pe, "Secondary bus %d associated with PE#%d\n",
366 bus->busn_res.start, pe_num);
368 if (pnv_ioda_configure_pe(phb, pe)) {
369 /* XXX What do we do here ? */
371 pnv_ioda_free_pe(phb, pe_num);
376 /* Associate it with all child devices */
377 pnv_ioda_setup_same_PE(bus, pe);
379 /* Put PE to the list */
380 list_add_tail(&pe->list, &phb->ioda.pe_list);
382 /* Account for one DMA PE if at least one DMA capable device exist
385 if (pe->dma_weight != 0) {
386 phb->ioda.dma_weight += pe->dma_weight;
387 phb->ioda.dma_pe_count++;
391 pnv_ioda_link_pe_by_weight(phb, pe);
394 static void pnv_ioda_setup_PEs(struct pci_bus *bus)
398 pnv_ioda_setup_bus_PE(bus, 0);
400 list_for_each_entry(dev, &bus->devices, bus_list) {
401 if (dev->subordinate) {
402 if (pci_pcie_type(dev) == PCI_EXP_TYPE_PCI_BRIDGE)
403 pnv_ioda_setup_bus_PE(dev->subordinate, 1);
405 pnv_ioda_setup_PEs(dev->subordinate);
411 * Configure PEs so that the downstream PCI buses and devices
412 * could have their associated PE#. Unfortunately, we didn't
413 * figure out the way to identify the PLX bridge yet. So we
414 * simply put the PCI bus and the subordinate behind the root
415 * port to PE# here. The game rule here is expected to be changed
416 * as soon as we can detected PLX bridge correctly.
418 static void pnv_pci_ioda_setup_PEs(void)
420 struct pci_controller *hose, *tmp;
422 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
423 pnv_ioda_setup_PEs(hose->bus);
427 static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
429 struct pci_dn *pdn = pci_get_pdn(pdev);
430 struct pnv_ioda_pe *pe;
433 * The function can be called while the PE#
434 * hasn't been assigned. Do nothing for the
437 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
440 pe = &phb->ioda.pe_array[pdn->pe_number];
441 set_iommu_table_base(&pdev->dev, &pe->tce32_table);
444 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
445 u64 *startp, u64 *endp)
447 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
448 unsigned long start, end, inc;
450 start = __pa(startp);
453 /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
458 start |= tbl->it_busno;
459 end |= tbl->it_busno;
460 } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
461 /* p7ioc-style invalidation, 2 TCEs per write */
462 start |= (1ull << 63);
466 /* Default (older HW) */
470 end |= inc - 1; /* round up end to be different than start */
472 mb(); /* Ensure above stores are visible */
473 while (start <= end) {
474 __raw_writeq(start, invalidate);
479 * The iommu layer will do another mb() for us on build()
480 * and we don't care on free()
484 static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
485 struct iommu_table *tbl,
486 u64 *startp, u64 *endp)
488 unsigned long start, end, inc;
489 u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
491 /* We'll invalidate DMA address in PE scope */
493 start |= (pe->pe_number & 0xFF);
496 /* Figure out the start, end and step */
497 inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
498 start |= (inc << 12);
499 inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
504 while (start <= end) {
505 __raw_writeq(start, invalidate);
510 void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
511 u64 *startp, u64 *endp)
513 struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
515 struct pnv_phb *phb = pe->phb;
517 if (phb->type == PNV_PHB_IODA1)
518 pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
520 pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
523 static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
524 struct pnv_ioda_pe *pe, unsigned int base,
528 struct page *tce_mem = NULL;
529 const __be64 *swinvp;
530 struct iommu_table *tbl;
535 /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
536 #define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
538 /* XXX FIXME: Handle 64-bit only DMA devices */
539 /* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
540 /* XXX FIXME: Allocate multi-level tables on PHB3 */
542 /* We shouldn't already have a 32-bit DMA associated */
543 if (WARN_ON(pe->tce32_seg >= 0))
546 /* Grab a 32-bit TCE table */
547 pe->tce32_seg = base;
548 pe_info(pe, " Setting up 32-bit TCE table at %08x..%08x\n",
549 (base << 28), ((base + segs) << 28) - 1);
551 /* XXX Currently, we allocate one big contiguous table for the
552 * TCEs. We only really need one chunk per 256M of TCE space
553 * (ie per segment) but that's an optimization for later, it
554 * requires some added smarts with our get/put_tce implementation
556 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
557 get_order(TCE32_TABLE_SIZE * segs));
559 pe_err(pe, " Failed to allocate a 32-bit TCE memory\n");
562 addr = page_address(tce_mem);
563 memset(addr, 0, TCE32_TABLE_SIZE * segs);
566 for (i = 0; i < segs; i++) {
567 rc = opal_pci_map_pe_dma_window(phb->opal_id,
570 __pa(addr) + TCE32_TABLE_SIZE * i,
571 TCE32_TABLE_SIZE, 0x1000);
573 pe_err(pe, " Failed to configure 32-bit TCE table,"
579 /* Setup linux iommu table */
580 tbl = &pe->tce32_table;
581 pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
584 /* OPAL variant of P7IOC SW invalidated TCEs */
585 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
587 /* We need a couple more fields -- an address and a data
588 * to or. Since the bus is only printed out on table free
589 * errors, and on the first pass the data will be a relative
590 * bus number, print that out instead.
593 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
594 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
597 iommu_init_table(tbl, phb->hose->node);
601 /* XXX Failure: Try to fallback to 64-bit only ? */
602 if (pe->tce32_seg >= 0)
605 __free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
608 static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
609 struct pnv_ioda_pe *pe)
611 struct page *tce_mem = NULL;
613 const __be64 *swinvp;
614 struct iommu_table *tbl;
615 unsigned int tce_table_size, end;
618 /* We shouldn't already have a 32-bit DMA associated */
619 if (WARN_ON(pe->tce32_seg >= 0))
622 /* The PE will reserve all possible 32-bits space */
624 end = (1 << ilog2(phb->ioda.m32_pci_base));
625 tce_table_size = (end / 0x1000) * 8;
626 pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
629 /* Allocate TCE table */
630 tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
631 get_order(tce_table_size));
633 pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
636 addr = page_address(tce_mem);
637 memset(addr, 0, tce_table_size);
640 * Map TCE table through TVT. The TVE index is the PE number
641 * shifted by 1 bit for 32-bits DMA space.
643 rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
644 pe->pe_number << 1, 1, __pa(addr),
645 tce_table_size, 0x1000);
647 pe_err(pe, "Failed to configure 32-bit TCE table,"
652 /* Setup linux iommu table */
653 tbl = &pe->tce32_table;
654 pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
656 /* OPAL variant of PHB3 invalidated TCEs */
657 swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
659 /* We need a couple more fields -- an address and a data
660 * to or. Since the bus is only printed out on table free
661 * errors, and on the first pass the data will be a relative
662 * bus number, print that out instead.
665 tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
666 tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
668 iommu_init_table(tbl, phb->hose->node);
672 if (pe->tce32_seg >= 0)
675 __free_pages(tce_mem, get_order(tce_table_size));
678 static void pnv_ioda_setup_dma(struct pnv_phb *phb)
680 struct pci_controller *hose = phb->hose;
681 unsigned int residual, remaining, segs, tw, base;
682 struct pnv_ioda_pe *pe;
684 /* If we have more PE# than segments available, hand out one
685 * per PE until we run out and let the rest fail. If not,
686 * then we assign at least one segment per PE, plus more based
687 * on the amount of devices under that PE
689 if (phb->ioda.dma_pe_count > phb->ioda.tce32_count)
692 residual = phb->ioda.tce32_count -
693 phb->ioda.dma_pe_count;
695 pr_info("PCI: Domain %04x has %ld available 32-bit DMA segments\n",
696 hose->global_number, phb->ioda.tce32_count);
697 pr_info("PCI: %d PE# for a total weight of %d\n",
698 phb->ioda.dma_pe_count, phb->ioda.dma_weight);
700 /* Walk our PE list and configure their DMA segments, hand them
701 * out one base segment plus any residual segments based on
704 remaining = phb->ioda.tce32_count;
705 tw = phb->ioda.dma_weight;
707 list_for_each_entry(pe, &phb->ioda.pe_dma_list, dma_link) {
711 pe_warn(pe, "No DMA32 resources available\n");
716 segs += ((pe->dma_weight * residual) + (tw / 2)) / tw;
717 if (segs > remaining)
722 * For IODA2 compliant PHB3, we needn't care about the weight.
723 * The all available 32-bits DMA space will be assigned to
726 if (phb->type == PNV_PHB_IODA1) {
727 pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
728 pe->dma_weight, segs);
729 pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
731 pe_info(pe, "Assign DMA32 space\n");
733 pnv_pci_ioda2_setup_dma_pe(phb, pe);
741 #ifdef CONFIG_PCI_MSI
742 static void pnv_ioda2_msi_eoi(struct irq_data *d)
744 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
745 struct irq_chip *chip = irq_data_get_irq_chip(d);
746 struct pnv_phb *phb = container_of(chip, struct pnv_phb,
750 rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
756 static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
757 unsigned int hwirq, unsigned int virq,
758 unsigned int is_64, struct msi_msg *msg)
760 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
761 struct pci_dn *pdn = pci_get_pdn(dev);
762 struct irq_data *idata;
763 struct irq_chip *ichip;
764 unsigned int xive_num = hwirq - phb->msi_base;
766 uint32_t addr32, data;
769 /* No PE assigned ? bail out ... no MSI for you ! */
773 /* Check if we have an MVE */
774 if (pe->mve_number < 0)
777 /* Force 32-bit MSI on some broken devices */
778 if (pdn && pdn->force_32bit_msi)
781 /* Assign XIVE to PE */
782 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
784 pr_warn("%s: OPAL error %d setting XIVE %d PE\n",
785 pci_name(dev), rc, xive_num);
790 rc = opal_get_msi_64(phb->opal_id, pe->mve_number, xive_num, 1,
793 pr_warn("%s: OPAL error %d getting 64-bit MSI data\n",
797 msg->address_hi = addr64 >> 32;
798 msg->address_lo = addr64 & 0xfffffffful;
800 rc = opal_get_msi_32(phb->opal_id, pe->mve_number, xive_num, 1,
803 pr_warn("%s: OPAL error %d getting 32-bit MSI data\n",
808 msg->address_lo = addr32;
813 * Change the IRQ chip for the MSI interrupts on PHB3.
814 * The corresponding IRQ chip should be populated for
817 if (phb->type == PNV_PHB_IODA2) {
818 if (!phb->ioda.irq_chip_init) {
819 idata = irq_get_irq_data(virq);
820 ichip = irq_data_get_irq_chip(idata);
821 phb->ioda.irq_chip_init = 1;
822 phb->ioda.irq_chip = *ichip;
823 phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
826 irq_set_chip(virq, &phb->ioda.irq_chip);
829 pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
830 " address=%x_%08x data=%x PE# %d\n",
831 pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
832 msg->address_hi, msg->address_lo, data, pe->pe_number);
837 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
840 const __be32 *prop = of_get_property(phb->hose->dn,
841 "ibm,opal-msi-ranges", NULL);
844 prop = of_get_property(phb->hose->dn, "msi-ranges", NULL);
849 phb->msi_base = be32_to_cpup(prop);
850 count = be32_to_cpup(prop + 1);
851 if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
852 pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
853 phb->hose->global_number);
857 phb->msi_setup = pnv_pci_ioda_msi_setup;
858 phb->msi32_support = 1;
859 pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
860 count, phb->msi_base);
863 static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
864 #endif /* CONFIG_PCI_MSI */
867 * This function is supposed to be called on basis of PE from top
868 * to bottom style. So the the I/O or MMIO segment assigned to
869 * parent PE could be overrided by its child PEs if necessary.
871 static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
872 struct pnv_ioda_pe *pe)
874 struct pnv_phb *phb = hose->private_data;
875 struct pci_bus_region region;
876 struct resource *res;
881 * NOTE: We only care PCI bus based PE for now. For PCI
882 * device based PE, for example SRIOV sensitive VF should
883 * be figured out later.
885 BUG_ON(!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)));
887 pci_bus_for_each_resource(pe->pbus, res, i) {
888 if (!res || !res->flags ||
889 res->start > res->end)
892 if (res->flags & IORESOURCE_IO) {
893 region.start = res->start - phb->ioda.io_pci_base;
894 region.end = res->end - phb->ioda.io_pci_base;
895 index = region.start / phb->ioda.io_segsize;
897 while (index < phb->ioda.total_pe &&
898 region.start <= region.end) {
899 phb->ioda.io_segmap[index] = pe->pe_number;
900 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
901 pe->pe_number, OPAL_IO_WINDOW_TYPE, 0, index);
902 if (rc != OPAL_SUCCESS) {
903 pr_err("%s: OPAL error %d when mapping IO "
904 "segment #%d to PE#%d\n",
905 __func__, rc, index, pe->pe_number);
909 region.start += phb->ioda.io_segsize;
912 } else if (res->flags & IORESOURCE_MEM) {
913 /* WARNING: Assumes M32 is mem region 0 in PHB. We need to
914 * harden that algorithm when we start supporting M64
916 region.start = res->start -
917 hose->mem_offset[0] -
918 phb->ioda.m32_pci_base;
919 region.end = res->end -
920 hose->mem_offset[0] -
921 phb->ioda.m32_pci_base;
922 index = region.start / phb->ioda.m32_segsize;
924 while (index < phb->ioda.total_pe &&
925 region.start <= region.end) {
926 phb->ioda.m32_segmap[index] = pe->pe_number;
927 rc = opal_pci_map_pe_mmio_window(phb->opal_id,
928 pe->pe_number, OPAL_M32_WINDOW_TYPE, 0, index);
929 if (rc != OPAL_SUCCESS) {
930 pr_err("%s: OPAL error %d when mapping M32 "
931 "segment#%d to PE#%d",
932 __func__, rc, index, pe->pe_number);
936 region.start += phb->ioda.m32_segsize;
943 static void pnv_pci_ioda_setup_seg(void)
945 struct pci_controller *tmp, *hose;
947 struct pnv_ioda_pe *pe;
949 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
950 phb = hose->private_data;
951 list_for_each_entry(pe, &phb->ioda.pe_list, list) {
952 pnv_ioda_setup_pe_seg(hose, pe);
957 static void pnv_pci_ioda_setup_DMA(void)
959 struct pci_controller *hose, *tmp;
962 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
963 pnv_ioda_setup_dma(hose->private_data);
965 /* Mark the PHB initialization done */
966 phb = hose->private_data;
967 phb->initialized = 1;
971 static void pnv_pci_ioda_fixup(void)
973 pnv_pci_ioda_setup_PEs();
974 pnv_pci_ioda_setup_seg();
975 pnv_pci_ioda_setup_DMA();
979 * Returns the alignment for I/O or memory windows for P2P
980 * bridges. That actually depends on how PEs are segmented.
981 * For now, we return I/O or M32 segment size for PE sensitive
982 * P2P bridges. Otherwise, the default values (4KiB for I/O,
983 * 1MiB for memory) will be returned.
985 * The current PCI bus might be put into one PE, which was
986 * create against the parent PCI bridge. For that case, we
987 * needn't enlarge the alignment so that we can save some
990 static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
993 struct pci_dev *bridge;
994 struct pci_controller *hose = pci_bus_to_host(bus);
995 struct pnv_phb *phb = hose->private_data;
996 int num_pci_bridges = 0;
1000 if (pci_pcie_type(bridge) == PCI_EXP_TYPE_PCI_BRIDGE) {
1002 if (num_pci_bridges >= 2)
1006 bridge = bridge->bus->self;
1009 /* We need support prefetchable memory window later */
1010 if (type & IORESOURCE_MEM)
1011 return phb->ioda.m32_segsize;
1013 return phb->ioda.io_segsize;
1016 /* Prevent enabling devices for which we couldn't properly
1019 static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1021 struct pci_controller *hose = pci_bus_to_host(dev->bus);
1022 struct pnv_phb *phb = hose->private_data;
1025 /* The function is probably called while the PEs have
1026 * not be created yet. For example, resource reassignment
1027 * during PCI probe period. We just skip the check if
1030 if (!phb->initialized)
1033 pdn = pci_get_pdn(dev);
1034 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1040 static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1043 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1046 static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1048 opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1052 void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1054 struct pci_controller *hose;
1055 static int primary = 1;
1056 struct pnv_phb *phb;
1057 unsigned long size, m32map_off, iomap_off, pemap_off;
1064 pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
1066 prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
1068 pr_err(" Missing \"ibm,opal-phbid\" property !\n");
1071 phb_id = be64_to_cpup(prop64);
1072 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1074 phb = alloc_bootmem(sizeof(struct pnv_phb));
1076 memset(phb, 0, sizeof(struct pnv_phb));
1077 phb->hose = hose = pcibios_alloc_controller(np);
1079 if (!phb || !phb->hose) {
1080 pr_err("PCI: Failed to allocate PCI controller for %s\n",
1085 spin_lock_init(&phb->lock);
1086 /* XXX Use device-tree */
1087 hose->first_busno = 0;
1088 hose->last_busno = 0xff;
1089 hose->private_data = phb;
1090 phb->opal_id = phb_id;
1091 phb->type = ioda_type;
1093 /* Detect specific models for error handling */
1094 if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
1095 phb->model = PNV_PHB_MODEL_P7IOC;
1096 else if (of_device_is_compatible(np, "ibm,power8-pciex"))
1097 phb->model = PNV_PHB_MODEL_PHB3;
1099 phb->model = PNV_PHB_MODEL_UNKNOWN;
1101 /* Parse 32-bit and IO ranges (if any) */
1102 pci_process_bridge_OF_ranges(phb->hose, np, primary);
1106 phb->regs = of_iomap(np, 0);
1107 if (phb->regs == NULL)
1108 pr_err(" Failed to map registers !\n");
1110 /* Initialize more IODA stuff */
1111 prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
1113 phb->ioda.total_pe = 1;
1115 phb->ioda.total_pe = *prop32;
1117 phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
1118 /* FW Has already off top 64k of M32 space (MSI space) */
1119 phb->ioda.m32_size += 0x10000;
1121 phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
1122 phb->ioda.m32_pci_base = hose->mem_resources[0].start - hose->mem_offset[0];
1123 phb->ioda.io_size = hose->pci_io_size;
1124 phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
1125 phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
1127 /* Allocate aux data & arrays
1129 * XXX TODO: Don't allocate io segmap on PHB3
1131 size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
1133 size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
1135 size += phb->ioda.total_pe * sizeof(phb->ioda.io_segmap[0]);
1137 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1138 aux = alloc_bootmem(size);
1139 memset(aux, 0, size);
1140 phb->ioda.pe_alloc = aux;
1141 phb->ioda.m32_segmap = aux + m32map_off;
1142 phb->ioda.io_segmap = aux + iomap_off;
1143 phb->ioda.pe_array = aux + pemap_off;
1144 set_bit(0, phb->ioda.pe_alloc);
1146 INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
1147 INIT_LIST_HEAD(&phb->ioda.pe_list);
1149 /* Calculate how many 32-bit TCE segments we have */
1150 phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
1152 /* Clear unusable m64 */
1153 hose->mem_resources[1].flags = 0;
1154 hose->mem_resources[1].start = 0;
1155 hose->mem_resources[1].end = 0;
1156 hose->mem_resources[2].flags = 0;
1157 hose->mem_resources[2].start = 0;
1158 hose->mem_resources[2].end = 0;
1160 #if 0 /* We should really do that ... */
1161 rc = opal_pci_set_phb_mem_window(opal->phb_id,
1164 starting_real_address,
1165 starting_pci_address,
1169 pr_info(" %d PE's M32: 0x%x [segment=0x%x] IO: 0x%x [segment=0x%x]\n",
1171 phb->ioda.m32_size, phb->ioda.m32_segsize,
1172 phb->ioda.io_size, phb->ioda.io_segsize);
1174 phb->hose->ops = &pnv_pci_ops;
1176 /* Setup RID -> PE mapping function */
1177 phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
1180 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1182 /* Setup shutdown function for kexec */
1183 phb->shutdown = pnv_pci_ioda_shutdown;
1185 /* Setup MSI support */
1186 pnv_pci_init_ioda_msis(phb);
1189 * We pass the PCI probe flag PCI_REASSIGN_ALL_RSRC here
1190 * to let the PCI core do resource assignment. It's supposed
1191 * that the PCI core will do correct I/O and MMIO alignment
1192 * for the P2P bridge bars so that each PCI bus (excluding
1193 * the child P2P bridges) can form individual PE.
1195 ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
1196 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1197 ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
1198 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
1200 /* Reset IODA tables to a clean state */
1201 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
1203 pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
1206 * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
1207 * has cleared the RTT which has the same effect
1209 if (ioda_type == PNV_PHB_IODA1)
1210 opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
1213 void pnv_pci_init_ioda2_phb(struct device_node *np)
1215 pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2);
1218 void __init pnv_pci_init_ioda_hub(struct device_node *np)
1220 struct device_node *phbn;
1224 pr_info("Probing IODA IO-Hub %s\n", np->full_name);
1226 prop64 = of_get_property(np, "ibm,opal-hubid", NULL);
1228 pr_err(" Missing \"ibm,opal-hubid\" property !\n");
1231 hub_id = be64_to_cpup(prop64);
1232 pr_devel(" HUB-ID : 0x%016llx\n", hub_id);
1234 /* Count child PHBs */
1235 for_each_child_of_node(np, phbn) {
1236 /* Look for IODA1 PHBs */
1237 if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
1238 pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1);