2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
15 #include <linux/kernel.h>
16 #include <linux/mmzone.h>
17 #include <linux/pci.h>
18 #include <linux/delay.h>
19 #include <linux/string.h>
20 #include <linux/init.h>
21 #include <linux/capability.h>
22 #include <linux/sched.h>
23 #include <linux/errno.h>
24 #include <linux/irq.h>
25 #include <linux/msi.h>
27 #include <linux/uaccess.h>
28 #include <linux/ctype.h>
30 #include <asm/processor.h>
31 #include <asm/sections.h>
32 #include <asm/byteorder.h>
34 #include <gxio/iorpc_globals.h>
35 #include <gxio/kiorpc.h>
36 #include <gxio/trio.h>
37 #include <gxio/iorpc_trio.h>
38 #include <hv/drv_trio_intf.h>
43 * This file containes the routines to search for PCI buses,
44 * enumerate the buses, and configure any attached devices.
47 #define DEBUG_PCI_CFG 0
50 #define TRACE_CFG_WR(size, val, bus, dev, func, offset) \
51 pr_info("CFG WR %d-byte VAL %#x to bus %d dev %d func %d addr %u\n", \
52 size, val, bus, dev, func, offset & 0xFFF);
53 #define TRACE_CFG_RD(size, val, bus, dev, func, offset) \
54 pr_info("CFG RD %d-byte VAL %#x from bus %d dev %d func %d addr %u\n", \
55 size, val, bus, dev, func, offset & 0xFFF);
57 #define TRACE_CFG_WR(...)
58 #define TRACE_CFG_RD(...)
61 static int pci_probe = 1;
63 /* Information on the PCIe RC ports configuration. */
64 static int pcie_rc[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
67 * On some platforms with one or more Gx endpoint ports, we need to
68 * delay the PCIe RC port probe for a few seconds to work around
69 * a HW PCIe link-training bug. The exact delay is specified with
70 * a kernel boot argument in the form of "pcie_rc_delay=T,P,S",
71 * where T is the TRIO instance number, P is the port number and S is
72 * the delay in seconds. If the delay is not provided, the value
73 * will be DEFAULT_RC_DELAY.
75 static int rc_delay[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
77 /* Default number of seconds that the PCIe RC port probe can be delayed. */
78 #define DEFAULT_RC_DELAY 10
80 /* Max number of seconds that the PCIe RC port probe can be delayed. */
81 #define MAX_RC_DELAY 20
83 /* Array of the PCIe ports configuration info obtained from the BIB. */
84 struct pcie_port_property pcie_ports[TILEGX_NUM_TRIO][TILEGX_TRIO_PCIES];
86 /* All drivers share the TRIO contexts defined here. */
87 gxio_trio_context_t trio_contexts[TILEGX_NUM_TRIO];
89 /* Pointer to an array of PCIe RC controllers. */
90 struct pci_controller pci_controllers[TILEGX_NUM_TRIO * TILEGX_TRIO_PCIES];
91 int num_rc_controllers;
92 static int num_ep_controllers;
94 static struct pci_ops tile_cfg_ops;
96 /* Mask of CPUs that should receive PCIe interrupts. */
97 static struct cpumask intr_cpus_map;
100 * We don't need to worry about the alignment of resources.
102 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
103 resource_size_t size, resource_size_t align)
107 EXPORT_SYMBOL(pcibios_align_resource);
111 * Pick a CPU to receive and handle the PCIe interrupts, based on the IRQ #.
112 * For now, we simply send interrupts to non-dataplane CPUs.
113 * We may implement methods to allow user to specify the target CPUs,
114 * e.g. via boot arguments.
116 static int tile_irq_cpu(int irq)
122 count = cpumask_weight(&intr_cpus_map);
123 if (unlikely(count == 0)) {
124 pr_warning("intr_cpus_map empty, interrupts will be"
125 " delievered to dataplane tiles\n");
126 return irq % (smp_height * smp_width);
130 for_each_cpu(cpu, &intr_cpus_map) {
138 * Open a file descriptor to the TRIO shim.
140 static int tile_pcie_open(int trio_index)
142 gxio_trio_context_t *context = &trio_contexts[trio_index];
146 * This opens a file descriptor to the TRIO shim.
148 ret = gxio_trio_init(context, trio_index);
153 * Allocate an ASID for the kernel.
155 ret = gxio_trio_alloc_asids(context, 1, 0, 0);
157 pr_err("PCI: ASID alloc failure on TRIO %d, give up\n",
159 goto asid_alloc_failure;
164 #ifdef USE_SHARED_PCIE_CONFIG_REGION
166 * Alloc a PIO region for config access, shared by all MACs per TRIO.
167 * This shouldn't fail since the kernel is supposed to the first
168 * client of the TRIO's PIO regions.
170 ret = gxio_trio_alloc_pio_regions(context, 1, 0, 0);
172 pr_err("PCI: CFG PIO alloc failure on TRIO %d, give up\n",
174 goto pio_alloc_failure;
177 context->pio_cfg_index = ret;
180 * For PIO CFG, the bus_address_hi parameter is 0. The mac parameter
181 * is also 0 because it is specified in PIO_REGION_SETUP_CFG_ADDR.
183 ret = gxio_trio_init_pio_region_aux(context, context->pio_cfg_index,
184 0, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
186 pr_err("PCI: CFG PIO init failure on TRIO %d, give up\n",
188 goto pio_alloc_failure;
195 #ifdef USE_SHARED_PCIE_CONFIG_REGION
198 hv_dev_close(context->fd);
204 tilegx_legacy_irq_ack(struct irq_data *d)
206 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
210 tilegx_legacy_irq_mask(struct irq_data *d)
212 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
216 tilegx_legacy_irq_unmask(struct irq_data *d)
218 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
221 static struct irq_chip tilegx_legacy_irq_chip = {
222 .name = "tilegx_legacy_irq",
223 .irq_ack = tilegx_legacy_irq_ack,
224 .irq_mask = tilegx_legacy_irq_mask,
225 .irq_unmask = tilegx_legacy_irq_unmask,
227 /* TBD: support set_affinity. */
231 * This is a wrapper function of the kernel level-trigger interrupt
232 * handler handle_level_irq() for PCI legacy interrupts. The TRIO
233 * is configured such that only INTx Assert interrupts are proxied
234 * to Linux which just calls handle_level_irq() after clearing the
235 * MAC INTx Assert status bit associated with this interrupt.
238 trio_handle_level_irq(unsigned int irq, struct irq_desc *desc)
240 struct pci_controller *controller = irq_desc_get_handler_data(desc);
241 gxio_trio_context_t *trio_context = controller->trio;
242 uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc);
243 int mac = controller->mac;
244 unsigned int reg_offset;
247 handle_level_irq(irq, desc);
250 * Clear the INTx Level status, otherwise future interrupts are
253 reg_offset = (TRIO_PCIE_INTFC_MAC_INT_STS <<
254 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
255 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
256 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
257 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
259 level_mask = TRIO_PCIE_INTFC_MAC_INT_STS__INT_LEVEL_MASK << intx;
261 __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset, level_mask);
265 * Create kernel irqs and set up the handlers for the legacy interrupts.
266 * Also some minimum initialization for the MSI support.
268 static int tile_init_irqs(struct pci_controller *controller)
275 cpumask_copy(&intr_cpus_map, cpu_online_mask);
278 for (i = 0; i < 4; i++) {
279 gxio_trio_context_t *context = controller->trio;
282 /* Ask the kernel to allocate an IRQ. */
285 pr_err("PCI: no free irq vectors, failed for %d\n", i);
289 controller->irq_intx_table[i] = irq;
291 /* Distribute the 4 IRQs to different tiles. */
292 cpu = tile_irq_cpu(irq);
294 /* Configure the TRIO intr binding for this IRQ. */
295 result = gxio_trio_config_legacy_intr(context, cpu_x(cpu),
296 cpu_y(cpu), KERNEL_PL,
297 irq, controller->mac, i);
299 pr_err("PCI: MAC intx config failed for %d\n", i);
305 * Register the IRQ handler with the kernel.
307 irq_set_chip_and_handler(irq, &tilegx_legacy_irq_chip,
308 trio_handle_level_irq);
309 irq_set_chip_data(irq, (void *)(uint64_t)i);
310 irq_set_handler_data(irq, controller);
316 for (j = 0; j < i; j++)
317 destroy_irq(controller->irq_intx_table[j]);
323 * Find valid controllers and fill in pci_controller structs for each
326 * Returns the number of controllers discovered.
328 int __init tile_pci_init(void)
330 int num_trio_shims = 0;
335 pr_info("PCI: disabled by boot argument\n");
339 pr_info("PCI: Searching for controllers...\n");
342 * We loop over all the TRIO shims.
344 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
347 ret = tile_pcie_open(i);
354 if (num_trio_shims == 0 || sim_is_simulator())
358 * Now determine which PCIe ports are configured to operate in RC mode.
359 * We look at the Board Information Block first and then see if there
360 * are any overriding configuration by the HW strapping pin.
362 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
363 gxio_trio_context_t *context = &trio_contexts[i];
369 ret = hv_dev_pread(context->fd, 0,
370 (HV_VirtAddr)&pcie_ports[i][0],
371 sizeof(struct pcie_port_property) * TILEGX_TRIO_PCIES,
372 GXIO_TRIO_OP_GET_PORT_PROPERTY);
374 pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
375 " on TRIO %d\n", ret, i);
379 for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
380 if (pcie_ports[i][j].allow_rc) {
382 num_rc_controllers++;
384 else if (pcie_ports[i][j].allow_ep) {
385 num_ep_controllers++;
391 * Return if no PCIe ports are configured to operate in RC mode.
393 if (num_rc_controllers == 0)
397 * Set the TRIO pointer and MAC index for each PCIe RC port.
399 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
400 for (j = 0; j < TILEGX_TRIO_PCIES; j++) {
402 pci_controllers[ctl_index].trio =
404 pci_controllers[ctl_index].mac = j;
405 pci_controllers[ctl_index].trio_index = i;
407 if (ctl_index == num_rc_controllers)
415 * Configure each PCIe RC port.
417 for (i = 0; i < num_rc_controllers; i++) {
419 * Configure the PCIe MAC to run in RC mode.
422 struct pci_controller *controller = &pci_controllers[i];
424 controller->index = i;
425 controller->ops = &tile_cfg_ops;
428 * The PCI memory resource is located above the PA space.
429 * For every host bridge, the BAR window or the MMIO aperture
430 * is in range [3GB, 4GB - 1] of a 4GB space beyond the
434 controller->mem_offset = TILE_PCI_MEM_START +
435 (i * TILE_PCI_BAR_WINDOW_TOP);
436 controller->mem_space.start = controller->mem_offset +
437 TILE_PCI_BAR_WINDOW_TOP - TILE_PCI_BAR_WINDOW_SIZE;
438 controller->mem_space.end = controller->mem_offset +
439 TILE_PCI_BAR_WINDOW_TOP - 1;
440 controller->mem_space.flags = IORESOURCE_MEM;
441 snprintf(controller->mem_space_name,
442 sizeof(controller->mem_space_name),
443 "PCI mem domain %d", i);
444 controller->mem_space.name = controller->mem_space_name;
447 return num_rc_controllers;
451 * (pin - 1) converts from the PCI standard's [1:4] convention to
452 * a normal [0:3] range.
454 static int tile_map_irq(const struct pci_dev *dev, u8 device, u8 pin)
456 struct pci_controller *controller =
457 (struct pci_controller *)dev->sysdata;
458 return controller->irq_intx_table[pin - 1];
462 static void fixup_read_and_payload_sizes(struct pci_controller *controller)
464 gxio_trio_context_t *trio_context = controller->trio;
465 struct pci_bus *root_bus = controller->root_bus;
466 TRIO_PCIE_RC_DEVICE_CONTROL_t dev_control;
467 TRIO_PCIE_RC_DEVICE_CAP_t rc_dev_cap;
468 unsigned int reg_offset;
469 struct pci_bus *child;
473 mac = controller->mac;
476 * Set our max read request size to be 4KB.
479 (TRIO_PCIE_RC_DEVICE_CONTROL <<
480 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
481 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
482 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
483 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
485 dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
487 dev_control.max_read_req_sz = 5;
488 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
492 * Set the max payload size supported by this Gx PCIe MAC.
493 * Though Gx PCIe supports Max Payload Size of up to 1024 bytes,
494 * experiments have shown that setting MPS to 256 yields the
498 (TRIO_PCIE_RC_DEVICE_CAP <<
499 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
500 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
501 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
502 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
504 rc_dev_cap.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
506 rc_dev_cap.mps_sup = 1;
507 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
510 /* Configure PCI Express MPS setting. */
511 list_for_each_entry(child, &root_bus->children, node)
512 pcie_bus_configure_settings(child);
515 * Set the mac_config register in trio based on the MPS/MRS of the link.
518 (TRIO_PCIE_RC_DEVICE_CONTROL <<
519 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
520 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
521 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
522 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
524 dev_control.word = __gxio_mmio_read32(trio_context->mmio_base_mac +
527 err = gxio_trio_set_mps_mrs(trio_context,
528 dev_control.max_payload_size,
529 dev_control.max_read_req_sz,
532 pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
533 "MAC %d on TRIO %d\n",
534 mac, controller->trio_index);
538 static int setup_pcie_rc_delay(char *str)
540 unsigned long delay = 0;
541 unsigned long trio_index;
544 if (str == NULL || !isdigit(*str))
546 trio_index = simple_strtoul(str, (char **)&str, 10);
547 if (trio_index >= TILEGX_NUM_TRIO)
556 mac = simple_strtoul(str, (char **)&str, 10);
557 if (mac >= TILEGX_TRIO_PCIES)
567 delay = simple_strtoul(str, (char **)&str, 10);
568 if (delay > MAX_RC_DELAY)
572 rc_delay[trio_index][mac] = delay ? : DEFAULT_RC_DELAY;
573 pr_info("Delaying PCIe RC link training for %u sec"
574 " on MAC %lu on TRIO %lu\n", rc_delay[trio_index][mac],
578 early_param("pcie_rc_delay", setup_pcie_rc_delay);
581 * PCI initialization entry point, called by subsys_initcall.
583 int __init pcibios_init(void)
585 resource_size_t offset;
586 LIST_HEAD(resources);
592 if (num_rc_controllers == 0 && num_ep_controllers == 0)
596 * We loop over all the TRIO shims and set up the MMIO mappings.
598 for (i = 0; i < TILEGX_NUM_TRIO; i++) {
599 gxio_trio_context_t *context = &trio_contexts[i];
605 * Map in the MMIO space for the MAC.
608 context->mmio_base_mac =
609 iorpc_ioremap(context->fd, offset,
610 HV_TRIO_CONFIG_IOREMAP_SIZE);
611 if (context->mmio_base_mac == NULL) {
612 pr_err("PCI: MAC map failure on TRIO %d\n", i);
614 hv_dev_close(context->fd);
621 * Delay a bit in case devices aren't ready. Some devices are
622 * known to require at least 20ms here, but we use a more
623 * conservative value.
627 /* Scan all of the recorded PCI controllers. */
628 for (next_busno = 0, i = 0; i < num_rc_controllers; i++) {
629 struct pci_controller *controller = &pci_controllers[i];
630 gxio_trio_context_t *trio_context = controller->trio;
631 TRIO_PCIE_INTFC_PORT_CONFIG_t port_config;
632 TRIO_PCIE_INTFC_PORT_STATUS_t port_status;
633 TRIO_PCIE_INTFC_TX_FIFO_CTL_t tx_fifo_ctl;
635 unsigned int reg_offset;
636 unsigned int class_code_revision;
641 if (trio_context->fd < 0)
644 trio_index = controller->trio_index;
645 mac = controller->mac;
648 * Check the port strap state which will override the BIB
653 (TRIO_PCIE_INTFC_PORT_CONFIG <<
654 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
655 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
656 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
657 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
660 __gxio_mmio_read(trio_context->mmio_base_mac +
663 if ((port_config.strap_state !=
664 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC) &&
665 (port_config.strap_state !=
666 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_RC_G1)) {
668 * If this is really intended to be an EP port,
669 * record it so that the endpoint driver will know about it.
671 if (port_config.strap_state ==
672 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT ||
673 port_config.strap_state ==
674 TRIO_PCIE_INTFC_PORT_CONFIG__STRAP_STATE_VAL_AUTO_CONFIG_ENDPOINT_G1)
675 pcie_ports[trio_index][mac].allow_ep = 1;
681 * Delay the RC link training if needed.
683 if (rc_delay[trio_index][mac])
684 msleep(rc_delay[trio_index][mac] * 1000);
686 ret = gxio_trio_force_rc_link_up(trio_context, mac);
688 pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
689 "MAC %d on TRIO %d\n", mac, trio_index);
691 pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
692 trio_index, controller->mac);
695 * Wait a bit here because some EP devices take longer
701 * Check for PCIe link-up status.
705 (TRIO_PCIE_INTFC_PORT_STATUS <<
706 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
707 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
708 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
709 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
712 __gxio_mmio_read(trio_context->mmio_base_mac +
714 if (!port_status.dl_up) {
715 pr_err("PCI: link is down, MAC %d on TRIO %d\n",
721 * Ensure that the link can come out of L1 power down state.
722 * Strictly speaking, this is needed only in the case of
723 * heavy RC-initiated DMAs.
726 (TRIO_PCIE_INTFC_TX_FIFO_CTL <<
727 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
728 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_INTERFACE <<
729 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
730 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
732 __gxio_mmio_read(trio_context->mmio_base_mac +
734 tx_fifo_ctl.min_p_credits = 0;
735 __gxio_mmio_write(trio_context->mmio_base_mac + reg_offset,
739 * Change the device ID so that Linux bus crawl doesn't confuse
740 * the internal bridge with any Tilera endpoints.
744 (TRIO_PCIE_RC_DEVICE_ID_VEN_ID <<
745 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
746 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
747 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
748 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
750 __gxio_mmio_write32(trio_context->mmio_base_mac + reg_offset,
751 (TILERA_GX36_RC_DEV_ID <<
752 TRIO_PCIE_RC_DEVICE_ID_VEN_ID__DEV_ID_SHIFT) |
756 * Set the internal P2P bridge class code.
760 (TRIO_PCIE_RC_REVISION_ID <<
761 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
762 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_STANDARD <<
763 TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
764 (mac << TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
766 class_code_revision =
767 __gxio_mmio_read32(trio_context->mmio_base_mac +
769 class_code_revision = (class_code_revision & 0xff ) |
770 (PCI_CLASS_BRIDGE_PCI << 16);
772 __gxio_mmio_write32(trio_context->mmio_base_mac +
773 reg_offset, class_code_revision);
775 #ifdef USE_SHARED_PCIE_CONFIG_REGION
778 * Map in the MMIO space for the PIO region.
780 offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index) |
781 (((unsigned long long)mac) <<
782 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
787 * Alloc a PIO region for PCI config access per MAC.
789 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
791 pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
792 "on TRIO %d, give up\n", mac, trio_index);
797 trio_context->pio_cfg_index[mac] = ret;
800 * For PIO CFG, the bus_address_hi parameter is 0.
802 ret = gxio_trio_init_pio_region_aux(trio_context,
803 trio_context->pio_cfg_index[mac],
804 mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
806 pr_err("PCI: PCI CFG PIO init failure for mac %d "
807 "on TRIO %d, give up\n", mac, trio_index);
812 offset = HV_TRIO_PIO_OFFSET(trio_context->pio_cfg_index[mac]) |
813 (((unsigned long long)mac) <<
814 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT);
818 trio_context->mmio_base_pio_cfg[mac] =
819 iorpc_ioremap(trio_context->fd, offset,
820 (1 << TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR__MAC_SHIFT));
821 if (trio_context->mmio_base_pio_cfg[mac] == NULL) {
822 pr_err("PCI: PIO map failure for mac %d on TRIO %d\n",
829 * Initialize the PCIe interrupts.
831 if (tile_init_irqs(controller)) {
832 pr_err("PCI: IRQs init failure for mac %d on TRIO %d\n",
839 * The PCI memory resource is located above the PA space.
840 * The memory range for the PCI root bus should not overlap
841 * with the physical RAM
843 pci_add_resource_offset(&resources, &controller->mem_space,
844 controller->mem_offset);
846 controller->first_busno = next_busno;
847 bus = pci_scan_root_bus(NULL, next_busno, controller->ops,
848 controller, &resources);
849 controller->root_bus = bus;
850 next_busno = bus->busn_res.end + 1;
854 /* Do machine dependent PCI interrupt routing */
855 pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
858 * This comes from the generic Linux PCI driver.
860 * It allocates all of the resources (I/O memory, etc)
861 * associated with the devices read in above.
864 pci_assign_unassigned_resources();
866 /* Record the I/O resources in the PCI controller structure. */
867 for (i = 0; i < num_rc_controllers; i++) {
868 struct pci_controller *controller = &pci_controllers[i];
869 gxio_trio_context_t *trio_context = controller->trio;
870 struct pci_bus *root_bus = pci_controllers[i].root_bus;
871 struct pci_bus *next_bus;
872 uint32_t bus_address_hi;
878 * Skip controllers that are not properly initialized or
881 if (root_bus == NULL)
884 /* Configure the max_payload_size values for this domain. */
885 fixup_read_and_payload_sizes(controller);
887 list_for_each_entry(dev, &root_bus->devices, bus_list) {
888 /* Find the PCI host controller, ie. the 1st bridge. */
889 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
890 (PCI_SLOT(dev->devfn) == 0)) {
891 next_bus = dev->subordinate;
892 pci_controllers[i].mem_resources[0] =
893 *next_bus->resource[0];
894 pci_controllers[i].mem_resources[1] =
895 *next_bus->resource[1];
896 pci_controllers[i].mem_resources[2] =
897 *next_bus->resource[2];
903 if (pci_controllers[i].mem_resources[1].flags & IORESOURCE_MEM)
905 pci_controllers[i].mem_resources[1].start >> 32;
906 else if (pci_controllers[i].mem_resources[2].flags & IORESOURCE_PREFETCH)
908 pci_controllers[i].mem_resources[2].start >> 32;
910 /* This is unlikely. */
911 pr_err("PCI: no memory resources on TRIO %d mac %d\n",
912 controller->trio_index, controller->mac);
917 * Alloc a PIO region for PCI memory access for each RC port.
919 ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
921 pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
922 "give up\n", controller->trio_index,
928 controller->pio_mem_index = ret;
931 * For PIO MEM, the bus_address_hi parameter is hard-coded 0
932 * because we always assign 32-bit PCI bus BAR ranges.
934 ret = gxio_trio_init_pio_region_aux(trio_context,
935 controller->pio_mem_index,
940 pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
941 "give up\n", controller->trio_index,
948 * Configure a Mem-Map region for each memory controller so
949 * that Linux can map all of its PA space to the PCI bus.
950 * Use the IOMMU to handle hash-for-home memory.
952 for_each_online_node(j) {
953 unsigned long start_pfn = node_start_pfn[j];
954 unsigned long end_pfn = node_end_pfn[j];
955 unsigned long nr_pages = end_pfn - start_pfn;
957 ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
960 pr_err("PCI: Mem-Map alloc failure on TRIO %d "
961 "mac %d for MC %d, give up\n",
962 controller->trio_index,
965 goto alloc_mem_map_failed;
968 controller->mem_maps[j] = ret;
971 * Initialize the Mem-Map and the I/O MMU so that all
972 * the physical memory can be accessed by the endpoint
973 * devices. The base bus address is set to the base CPA
974 * of this memory controller plus an offset (see pci.h).
975 * The region's base VA is set to the base CPA. The
976 * I/O MMU table essentially translates the CPA to
977 * the real PA. Implicitly, for node 0, we create
978 * a separate Mem-Map region that serves as the inbound
979 * window for legacy 32-bit devices. This is a direct
980 * map of the low 4GB CPA space.
982 ret = gxio_trio_init_memory_map_mmu_aux(trio_context,
983 controller->mem_maps[j],
984 start_pfn << PAGE_SHIFT,
985 nr_pages << PAGE_SHIFT,
988 (start_pfn << PAGE_SHIFT) +
989 TILE_PCI_MEM_MAP_BASE_OFFSET,
991 GXIO_TRIO_ORDER_MODE_UNORDERED);
993 pr_err("PCI: Mem-Map init failure on TRIO %d "
994 "mac %d for MC %d, give up\n",
995 controller->trio_index,
998 goto alloc_mem_map_failed;
1002 alloc_mem_map_failed:
1010 subsys_initcall(pcibios_init);
1012 /* Note: to be deleted after Linux 3.6 merge. */
1013 void pcibios_fixup_bus(struct pci_bus *bus)
1018 * This can be called from the generic PCI layer, but doesn't need to
1021 char *pcibios_setup(char *str)
1023 if (!strcmp(str, "off")) {
1031 * Enable memory address decoding, as appropriate, for the
1032 * device described by the 'dev' struct. The I/O decoding
1033 * is disabled, though the TILE-Gx supports I/O addressing.
1035 * This is called from the generic PCI layer, and can be called
1036 * for bridges or endpoints.
1038 int pcibios_enable_device(struct pci_dev *dev, int mask)
1040 return pci_enable_resources(dev, mask);
1043 /* Called for each device after PCI setup is done. */
1044 static void pcibios_fixup_final(struct pci_dev *pdev)
1046 set_dma_ops(&pdev->dev, gx_pci_dma_map_ops);
1047 set_dma_offset(&pdev->dev, TILE_PCI_MEM_MAP_BASE_OFFSET);
1048 pdev->dev.archdata.max_direct_dma_addr =
1049 TILE_PCI_MAX_DIRECT_DMA_ADDRESS;
1051 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
1053 /* Map a PCI MMIO bus address into VA space. */
1054 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
1056 struct pci_controller *controller = NULL;
1057 resource_size_t bar_start;
1058 resource_size_t bar_end;
1059 resource_size_t offset;
1060 resource_size_t start;
1061 resource_size_t end;
1066 end = phys_addr + size - 1;
1069 * In the following, each PCI controller's mem_resources[1]
1070 * represents its (non-prefetchable) PCI memory resource and
1071 * mem_resources[2] refers to its prefetchable PCI memory resource.
1072 * By searching phys_addr in each controller's mem_resources[], we can
1073 * determine the controller that should accept the PCI memory access.
1076 for (i = 0; i < num_rc_controllers; i++) {
1078 * Skip controllers that are not properly initialized or
1081 if (pci_controllers[i].root_bus == NULL)
1084 for (j = 1; j < 3; j++) {
1086 pci_controllers[i].mem_resources[j].start;
1088 pci_controllers[i].mem_resources[j].end;
1090 if ((start >= bar_start) && (end <= bar_end)) {
1092 controller = &pci_controllers[i];
1099 if (controller == NULL)
1103 trio_fd = controller->trio->fd;
1105 /* Convert the resource start to the bus address offset. */
1106 start = phys_addr - controller->mem_offset;
1108 offset = HV_TRIO_PIO_OFFSET(controller->pio_mem_index) + start;
1111 * We need to keep the PCI bus address's in-page offset in the VA.
1113 return iorpc_ioremap(trio_fd, offset, size) +
1114 (phys_addr & (PAGE_SIZE - 1));
1116 EXPORT_SYMBOL(ioremap);
1118 void pci_iounmap(struct pci_dev *dev, void __iomem *addr)
1122 EXPORT_SYMBOL(pci_iounmap);
1124 /****************************************************************
1126 * Tile PCI config space read/write routines
1128 ****************************************************************/
1131 * These are the normal read and write ops
1132 * These are expanded with macros from pci_bus_read_config_byte() etc.
1134 * devfn is the combined PCI device & function.
1136 * offset is in bytes, from the start of config space for the
1137 * specified bus & device.
1140 static int tile_cfg_read(struct pci_bus *bus, unsigned int devfn, int offset,
1143 struct pci_controller *controller = bus->sysdata;
1144 gxio_trio_context_t *trio_context = controller->trio;
1145 int busnum = bus->number & 0xff;
1146 int device = PCI_SLOT(devfn);
1147 int function = PCI_FUNC(devfn);
1148 int config_type = 1;
1149 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
1153 * Map all accesses to the local device on root bus into the
1154 * MMIO space of the MAC. Accesses to the downstream devices
1155 * go to the PIO space.
1157 if (pci_is_root_bus(bus)) {
1160 * This is the internal downstream P2P bridge,
1163 unsigned int reg_offset;
1165 reg_offset = ((offset & 0xFFF) <<
1166 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
1167 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
1168 << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
1170 TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
1172 mmio_addr = trio_context->mmio_base_mac + reg_offset;
1178 * We fake an empty device for (device > 0),
1179 * since there is only one device on bus 0.
1181 goto invalid_device;
1186 * Accesses to the directly attached device have to be
1187 * sent as type-0 configs.
1190 if (busnum == (controller->first_busno + 1)) {
1192 * There is only one device off of our built-in P2P bridge.
1195 goto invalid_device;
1201 cfg_addr.reg_addr = (offset & 0xFFF);
1202 cfg_addr.fn = function;
1203 cfg_addr.dev = device;
1204 cfg_addr.bus = busnum;
1205 cfg_addr.type = config_type;
1208 * Note that we don't set the mac field in cfg_addr because the
1209 * mapping is per port.
1212 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1219 *val = __gxio_mmio_read32(mmio_addr);
1223 *val = __gxio_mmio_read16(mmio_addr);
1227 *val = __gxio_mmio_read8(mmio_addr);
1231 return PCIBIOS_FUNC_NOT_SUPPORTED;
1234 TRACE_CFG_RD(size, *val, busnum, device, function, offset);
1254 return PCIBIOS_FUNC_NOT_SUPPORTED;
1262 * See tile_cfg_read() for relevent comments.
1263 * Note that "val" is the value to write, not a pointer to that value.
1265 static int tile_cfg_write(struct pci_bus *bus, unsigned int devfn, int offset,
1268 struct pci_controller *controller = bus->sysdata;
1269 gxio_trio_context_t *trio_context = controller->trio;
1270 int busnum = bus->number & 0xff;
1271 int device = PCI_SLOT(devfn);
1272 int function = PCI_FUNC(devfn);
1273 int config_type = 1;
1274 TRIO_TILE_PIO_REGION_SETUP_CFG_ADDR_t cfg_addr;
1276 u32 val_32 = (u32)val;
1277 u16 val_16 = (u16)val;
1281 * Map all accesses to the local device on root bus into the
1282 * MMIO space of the MAC. Accesses to the downstream devices
1283 * go to the PIO space.
1285 if (pci_is_root_bus(bus)) {
1288 * This is the internal downstream P2P bridge,
1291 unsigned int reg_offset;
1293 reg_offset = ((offset & 0xFFF) <<
1294 TRIO_CFG_REGION_ADDR__REG_SHIFT) |
1295 (TRIO_CFG_REGION_ADDR__INTFC_VAL_MAC_PROTECTED
1296 << TRIO_CFG_REGION_ADDR__INTFC_SHIFT ) |
1298 TRIO_CFG_REGION_ADDR__MAC_SEL_SHIFT);
1300 mmio_addr = trio_context->mmio_base_mac + reg_offset;
1306 * We fake an empty device for (device > 0),
1307 * since there is only one device on bus 0.
1309 goto invalid_device;
1314 * Accesses to the directly attached device have to be
1315 * sent as type-0 configs.
1318 if (busnum == (controller->first_busno + 1)) {
1320 * There is only one device off of our built-in P2P bridge.
1323 goto invalid_device;
1329 cfg_addr.reg_addr = (offset & 0xFFF);
1330 cfg_addr.fn = function;
1331 cfg_addr.dev = device;
1332 cfg_addr.bus = busnum;
1333 cfg_addr.type = config_type;
1336 * Note that we don't set the mac field in cfg_addr because the
1337 * mapping is per port.
1340 mmio_addr = trio_context->mmio_base_pio_cfg[controller->mac] +
1347 __gxio_mmio_write32(mmio_addr, val_32);
1348 TRACE_CFG_WR(size, val_32, busnum, device, function, offset);
1352 __gxio_mmio_write16(mmio_addr, val_16);
1353 TRACE_CFG_WR(size, val_16, busnum, device, function, offset);
1357 __gxio_mmio_write8(mmio_addr, val_8);
1358 TRACE_CFG_WR(size, val_8, busnum, device, function, offset);
1362 return PCIBIOS_FUNC_NOT_SUPPORTED;
1371 static struct pci_ops tile_cfg_ops = {
1372 .read = tile_cfg_read,
1373 .write = tile_cfg_write,
1378 * MSI support starts here.
1381 tilegx_msi_startup(struct irq_data *d)
1390 tilegx_msi_ack(struct irq_data *d)
1392 __insn_mtspr(SPR_IPI_EVENT_RESET_K, 1UL << d->irq);
1396 tilegx_msi_mask(struct irq_data *d)
1399 __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq);
1403 tilegx_msi_unmask(struct irq_data *d)
1405 __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq);
1409 static struct irq_chip tilegx_msi_chip = {
1410 .name = "tilegx_msi",
1411 .irq_startup = tilegx_msi_startup,
1412 .irq_ack = tilegx_msi_ack,
1413 .irq_mask = tilegx_msi_mask,
1414 .irq_unmask = tilegx_msi_unmask,
1416 /* TBD: support set_affinity. */
1419 int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
1421 struct pci_controller *controller;
1422 gxio_trio_context_t *trio_context;
1425 uint64_t mem_map_base;
1426 uint64_t mem_map_limit;
1438 * Since we use a 64-bit Mem-Map to accept the MSI write, we fail
1439 * devices that are not capable of generating a 64-bit message address.
1440 * These devices will fall back to using the legacy interrupts.
1441 * Most PCIe endpoint devices do support 64-bit message addressing.
1443 if (desc->msi_attrib.is_64 == 0) {
1444 dev_printk(KERN_INFO, &pdev->dev,
1445 "64-bit MSI message address not supported, "
1446 "falling back to legacy interrupts.\n");
1452 default_irq = desc->msi_attrib.default_irq;
1453 controller = irq_get_handler_data(default_irq);
1455 BUG_ON(!controller);
1457 trio_context = controller->trio;
1460 * Allocate the Mem-Map that will accept the MSI write and
1461 * trigger the TILE-side interrupts.
1463 mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
1465 dev_printk(KERN_INFO, &pdev->dev,
1466 "%s Mem-Map alloc failure. "
1467 "Failed to initialize MSI interrupts. "
1468 "Falling back to legacy interrupts.\n",
1469 desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
1472 goto msi_mem_map_alloc_failure;
1475 /* We try to distribute different IRQs to different tiles. */
1476 cpu = tile_irq_cpu(irq);
1479 * Now call up to the HV to configure the Mem-Map interrupt and
1480 * set up the IPI binding.
1482 mem_map_base = MEM_MAP_INTR_REGIONS_BASE +
1483 mem_map * MEM_MAP_INTR_REGION_SIZE;
1484 mem_map_limit = mem_map_base + MEM_MAP_INTR_REGION_SIZE - 1;
1486 ret = gxio_trio_config_msi_intr(trio_context, cpu_x(cpu), cpu_y(cpu),
1487 KERNEL_PL, irq, controller->mac,
1488 mem_map, mem_map_base, mem_map_limit,
1489 trio_context->asid);
1491 dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n");
1493 goto hv_msi_config_failure;
1496 irq_set_msi_desc(irq, desc);
1498 msi_addr = mem_map_base + TRIO_MAP_MEM_REG_INT3 - TRIO_MAP_MEM_REG_INT0;
1500 msg.address_hi = msi_addr >> 32;
1501 msg.address_lo = msi_addr & 0xffffffff;
1505 write_msi_msg(irq, &msg);
1506 irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq);
1507 irq_set_handler_data(irq, controller);
1511 hv_msi_config_failure:
1513 msi_mem_map_alloc_failure:
1519 void arch_teardown_msi_irq(unsigned int irq)