2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/pci-acpi.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
26 #include <asm/machvec.h>
32 #include <asm/hw_irq.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
43 /* SAL 3.2 adds support for extended config space. */
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
48 int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
49 int reg, int len, u32 *value)
54 if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
57 if ((seg | reg) <= 255) {
58 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
60 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
61 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
67 result = ia64_sal_pci_config_read(addr, mode, len, &data);
75 int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
76 int reg, int len, u32 value)
81 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
84 if ((seg | reg) <= 255) {
85 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
87 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
88 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
93 result = ia64_sal_pci_config_write(addr, mode, len, value);
99 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
100 int size, u32 *value)
102 return raw_pci_read(pci_domain_nr(bus), bus->number,
103 devfn, where, size, value);
106 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
109 return raw_pci_write(pci_domain_nr(bus), bus->number,
110 devfn, where, size, value);
113 struct pci_ops pci_root_ops = {
118 /* Called by ACPI when it finds a new root bus. */
120 static struct pci_controller *alloc_pci_controller(int seg)
122 struct pci_controller *controller;
124 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
128 controller->segment = seg;
132 struct pci_root_info {
133 struct acpi_device *bridge;
134 struct pci_controller *controller;
135 struct list_head resources;
136 struct resource *res;
137 resource_size_t *res_offset;
138 unsigned int res_num;
139 struct list_head io_resources;
144 new_space (u64 phys_base, int sparse)
150 return 0; /* legacy I/O port space */
152 mmio_base = (u64) ioremap(phys_base, 0);
153 for (i = 0; i < num_io_spaces; i++)
154 if (io_space[i].mmio_base == mmio_base &&
155 io_space[i].sparse == sparse)
158 if (num_io_spaces == MAX_IO_SPACES) {
159 pr_err("PCI: Too many IO port spaces "
160 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
165 io_space[i].mmio_base = mmio_base;
166 io_space[i].sparse = sparse;
171 static u64 add_io_space(struct pci_root_info *info,
172 struct acpi_resource_address64 *addr)
174 struct iospace_resource *iospace;
175 struct resource *resource;
177 unsigned long base, min, max, base_port;
178 unsigned int sparse = 0, space_nr, len;
180 len = strlen(info->name) + 32;
181 iospace = kzalloc(sizeof(*iospace) + len, GFP_KERNEL);
183 dev_err(&info->bridge->dev,
184 "PCI: No memory for %s I/O port space\n",
189 name = (char *)(iospace + 1);
192 max = min + addr->address_length - 1;
193 if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
196 space_nr = new_space(addr->translation_offset, sparse);
200 base = __pa(io_space[space_nr].mmio_base);
201 base_port = IO_SPACE_BASE(space_nr);
202 snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
203 base_port + min, base_port + max);
206 * The SDM guarantees the legacy 0-64K space is sparse, but if the
207 * mapping is done by the processor (not the bridge), ACPI may not
213 resource = &iospace->res;
214 resource->name = name;
215 resource->flags = IORESOURCE_MEM;
216 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
217 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
218 if (insert_resource(&iomem_resource, resource)) {
219 dev_err(&info->bridge->dev,
220 "can't allocate host bridge io space resource %pR\n",
225 list_add_tail(&iospace->list, &info->io_resources);
234 static acpi_status resource_to_window(struct acpi_resource *resource,
235 struct acpi_resource_address64 *addr)
240 * We're only interested in _CRS descriptors that are
241 * - address space descriptors for memory or I/O space
243 * - producers, i.e., the address space is routed downstream,
244 * not consumed by the bridge itself
246 status = acpi_resource_to_address64(resource, addr);
247 if (ACPI_SUCCESS(status) &&
248 (addr->resource_type == ACPI_MEMORY_RANGE ||
249 addr->resource_type == ACPI_IO_RANGE) &&
250 addr->address_length &&
251 addr->producer_consumer == ACPI_PRODUCER)
257 static acpi_status count_window(struct acpi_resource *resource, void *data)
259 unsigned int *windows = (unsigned int *) data;
260 struct acpi_resource_address64 addr;
263 status = resource_to_window(resource, &addr);
264 if (ACPI_SUCCESS(status))
270 static acpi_status add_window(struct acpi_resource *res, void *data)
272 struct pci_root_info *info = data;
273 struct resource *resource;
274 struct acpi_resource_address64 addr;
276 unsigned long flags, offset = 0;
277 struct resource *root;
279 /* Return AE_OK for non-window resources to keep scanning for more */
280 status = resource_to_window(res, &addr);
281 if (!ACPI_SUCCESS(status))
284 if (addr.resource_type == ACPI_MEMORY_RANGE) {
285 flags = IORESOURCE_MEM;
286 root = &iomem_resource;
287 offset = addr.translation_offset;
288 } else if (addr.resource_type == ACPI_IO_RANGE) {
289 flags = IORESOURCE_IO;
290 root = &ioport_resource;
291 offset = add_io_space(info, &addr);
297 resource = &info->res[info->res_num];
298 resource->name = info->name;
299 resource->flags = flags;
300 resource->start = addr.minimum + offset;
301 resource->end = resource->start + addr.address_length - 1;
302 info->res_offset[info->res_num] = offset;
304 if (insert_resource(root, resource)) {
305 dev_err(&info->bridge->dev,
306 "can't allocate host bridge window %pR\n",
310 dev_info(&info->bridge->dev, "host bridge window %pR "
311 "(PCI address [%#llx-%#llx])\n",
313 resource->start - offset,
314 resource->end - offset);
316 dev_info(&info->bridge->dev,
317 "host bridge window %pR\n", resource);
319 /* HP's firmware has a hack to work around a Windows bug.
320 * Ignore these tiny memory ranges */
321 if (!((resource->flags & IORESOURCE_MEM) &&
322 (resource->end - resource->start < 16)))
323 pci_add_resource_offset(&info->resources, resource,
324 info->res_offset[info->res_num]);
330 static void free_pci_root_info_res(struct pci_root_info *info)
332 struct iospace_resource *iospace, *tmp;
334 list_for_each_entry_safe(iospace, tmp, &info->io_resources, list)
340 kfree(info->res_offset);
341 info->res_offset = NULL;
343 kfree(info->controller);
344 info->controller = NULL;
347 static void __release_pci_root_info(struct pci_root_info *info)
350 struct resource *res;
351 struct iospace_resource *iospace;
353 list_for_each_entry(iospace, &info->io_resources, list)
354 release_resource(&iospace->res);
356 for (i = 0; i < info->res_num; i++) {
362 if (!(res->flags & (IORESOURCE_MEM | IORESOURCE_IO)))
365 release_resource(res);
368 free_pci_root_info_res(info);
372 static void release_pci_root_info(struct pci_host_bridge *bridge)
374 struct pci_root_info *info = bridge->release_data;
376 __release_pci_root_info(info);
380 probe_pci_root_info(struct pci_root_info *info, struct acpi_device *device,
381 int busnum, int domain)
385 name = kmalloc(16, GFP_KERNEL);
389 sprintf(name, "PCI Bus %04x:%02x", domain, busnum);
390 info->bridge = device;
393 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
397 kzalloc_node(sizeof(*info->res) * info->res_num,
398 GFP_KERNEL, info->controller->node);
405 kzalloc_node(sizeof(*info->res_offset) * info->res_num,
406 GFP_KERNEL, info->controller->node);
407 if (!info->res_offset) {
415 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
423 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
425 struct acpi_device *device = root->device;
426 int domain = root->segment;
427 int bus = root->secondary.start;
428 struct pci_controller *controller;
429 struct pci_root_info *info = NULL;
430 int busnum = root->secondary.start;
431 struct pci_bus *pbus;
434 controller = alloc_pci_controller(domain);
438 controller->companion = device;
439 controller->node = acpi_get_node(device->handle);
441 info = kzalloc(sizeof(*info), GFP_KERNEL);
443 dev_err(&device->dev,
444 "pci_bus %04x:%02x: ignored (out of memory)\n",
450 info->controller = controller;
451 INIT_LIST_HEAD(&info->io_resources);
452 INIT_LIST_HEAD(&info->resources);
454 ret = probe_pci_root_info(info, device, busnum, domain);
456 kfree(info->controller);
460 /* insert busn resource at first */
461 pci_add_resource(&info->resources, &root->secondary);
463 * See arch/x86/pci/acpi.c.
464 * The desired pci bus might already be scanned in a quirk. We
465 * should handle the case here, but it appears that IA64 hasn't
466 * such quirk. So we just ignore the case now.
468 pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller,
471 pci_free_resource_list(&info->resources);
472 __release_pci_root_info(info);
476 pci_set_host_bridge_release(to_pci_host_bridge(pbus->bridge),
477 release_pci_root_info, info);
478 pci_scan_child_bus(pbus);
482 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
484 struct pci_controller *controller = bridge->bus->sysdata;
486 ACPI_COMPANION_SET(&bridge->dev, controller->companion);
490 static int is_valid_resource(struct pci_dev *dev, int idx)
492 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
493 struct resource *devr = &dev->resource[idx], *busr;
498 pci_bus_for_each_resource(dev->bus, busr, i) {
499 if (!busr || ((busr->flags ^ devr->flags) & type_mask))
501 if ((devr->start) && (devr->start >= busr->start) &&
502 (devr->end <= busr->end))
508 static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
512 for (i = start; i < limit; i++) {
513 if (!dev->resource[i].flags)
515 if ((is_valid_resource(dev, i)))
516 pci_claim_resource(dev, i);
520 void pcibios_fixup_device_resources(struct pci_dev *dev)
522 pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
524 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
526 static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
528 pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
532 * Called after each bus is probed, but before its children are examined.
534 void pcibios_fixup_bus(struct pci_bus *b)
539 pci_read_bridge_bases(b);
540 pcibios_fixup_bridge_resources(b->self);
542 list_for_each_entry(dev, &b->devices, bus_list)
543 pcibios_fixup_device_resources(dev);
544 platform_pci_fixup_bus(b);
547 void pcibios_add_bus(struct pci_bus *bus)
549 acpi_pci_add_bus(bus);
552 void pcibios_remove_bus(struct pci_bus *bus)
554 acpi_pci_remove_bus(bus);
557 void pcibios_set_master (struct pci_dev *dev)
559 /* No special bus mastering setup handling */
563 pcibios_enable_device (struct pci_dev *dev, int mask)
567 ret = pci_enable_resources(dev, mask);
571 if (!dev->msi_enabled)
572 return acpi_pci_irq_enable(dev);
577 pcibios_disable_device (struct pci_dev *dev)
579 BUG_ON(atomic_read(&dev->enable_cnt));
580 if (!dev->msi_enabled)
581 acpi_pci_irq_disable(dev);
585 pcibios_align_resource (void *data, const struct resource *res,
586 resource_size_t size, resource_size_t align)
592 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
593 enum pci_mmap_state mmap_state, int write_combine)
595 unsigned long size = vma->vm_end - vma->vm_start;
599 * I/O space cannot be accessed via normal processor loads and
600 * stores on this platform.
602 if (mmap_state == pci_mmap_io)
604 * XXX we could relax this for I/O spaces for which ACPI
605 * indicates that the space is 1-to-1 mapped. But at the
606 * moment, we don't support multiple PCI address spaces and
607 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
611 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
614 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
618 * If the user requested WC, the kernel uses UC or WC for this region,
619 * and the chipset supports WC, we can use WC. Otherwise, we have to
620 * use the same attribute the kernel uses.
623 ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
624 (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
625 efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
626 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
628 vma->vm_page_prot = prot;
630 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
631 vma->vm_end - vma->vm_start, vma->vm_page_prot))
638 * ia64_pci_get_legacy_mem - generic legacy mem routine
639 * @bus: bus to get legacy memory base address for
641 * Find the base of legacy memory for @bus. This is typically the first
642 * megabyte of bus address space for @bus or is simply 0 on platforms whose
643 * chipsets support legacy I/O and memory routing. Returns the base address
644 * or an error pointer if an error occurred.
646 * This is the ia64 generic version of this routine. Other platforms
647 * are free to override it with a machine vector.
649 char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
651 return (char *)__IA64_UNCACHED_OFFSET;
655 * pci_mmap_legacy_page_range - map legacy memory space to userland
656 * @bus: bus whose legacy space we're mapping
657 * @vma: vma passed in by mmap
659 * Map legacy memory space for this device back to userspace using a machine
660 * vector to get the base address.
663 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
664 enum pci_mmap_state mmap_state)
666 unsigned long size = vma->vm_end - vma->vm_start;
670 /* We only support mmap'ing of legacy memory space */
671 if (mmap_state != pci_mmap_mem)
675 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
678 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
680 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
683 addr = pci_get_legacy_mem(bus);
685 return PTR_ERR(addr);
687 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
688 vma->vm_page_prot = prot;
690 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
691 size, vma->vm_page_prot))
698 * ia64_pci_legacy_read - read from legacy I/O space
700 * @port: legacy port value
701 * @val: caller allocated storage for returned value
702 * @size: number of bytes to read
704 * Simply reads @size bytes from @port and puts the result in @val.
706 * Again, this (and the write routine) are generic versions that can be
707 * overridden by the platform. This is necessary on platforms that don't
708 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
710 int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
733 * ia64_pci_legacy_write - perform a legacy I/O write
735 * @port: port to write
736 * @val: value to write
737 * @size: number of bytes to write from @val
739 * Simply writes @size bytes of @val to @port.
741 int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
764 * set_pci_cacheline_size - determine cacheline size for PCI devices
766 * We want to use the line-size of the outer-most cache. We assume
767 * that this line-size is the same for all CPUs.
769 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
771 static void __init set_pci_dfl_cacheline_size(void)
773 unsigned long levels, unique_caches;
775 pal_cache_config_info_t cci;
777 status = ia64_pal_cache_summary(&levels, &unique_caches);
779 pr_err("%s: ia64_pal_cache_summary() failed "
780 "(status=%ld)\n", __func__, status);
784 status = ia64_pal_cache_config_info(levels - 1,
785 /* cache_type (data_or_unified)= */ 2, &cci);
787 pr_err("%s: ia64_pal_cache_config_info() failed "
788 "(status=%ld)\n", __func__, status);
791 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
794 u64 ia64_dma_get_required_mask(struct device *dev)
796 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
797 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
800 if (!high_totalram) {
801 /* convert to mask just covering totalram */
802 low_totalram = (1 << (fls(low_totalram) - 1));
803 low_totalram += low_totalram - 1;
806 high_totalram = (1 << (fls(high_totalram) - 1));
807 high_totalram += high_totalram - 1;
808 mask = (((u64)high_totalram) << 32) + 0xffffffff;
812 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
814 u64 dma_get_required_mask(struct device *dev)
816 return platform_dma_get_required_mask(dev);
818 EXPORT_SYMBOL_GPL(dma_get_required_mask);
820 static int __init pcibios_init(void)
822 set_pci_dfl_cacheline_size();
826 subsys_initcall(pcibios_init);