2 * pci.c - Low-Level PCI Access in IA-64
4 * Derived from bios32.c of i386 tree.
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
11 * Note: Above list of copyright holders is incomplete...
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/pci-acpi.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
26 #include <asm/machvec.h>
32 #include <asm/hw_irq.h>
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
43 /* SAL 3.2 adds support for extended config space. */
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
48 int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
49 int reg, int len, u32 *value)
54 if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
57 if ((seg | reg) <= 255) {
58 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
60 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
61 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
67 result = ia64_sal_pci_config_read(addr, mode, len, &data);
75 int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
76 int reg, int len, u32 value)
81 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
84 if ((seg | reg) <= 255) {
85 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
87 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
88 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
93 result = ia64_sal_pci_config_write(addr, mode, len, value);
99 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
100 int size, u32 *value)
102 return raw_pci_read(pci_domain_nr(bus), bus->number,
103 devfn, where, size, value);
106 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
109 return raw_pci_write(pci_domain_nr(bus), bus->number,
110 devfn, where, size, value);
113 struct pci_ops pci_root_ops = {
118 /* Called by ACPI when it finds a new root bus. */
120 static struct pci_controller *alloc_pci_controller(int seg)
122 struct pci_controller *controller;
124 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
128 controller->segment = seg;
129 controller->node = -1;
133 struct pci_root_info {
134 struct acpi_device *bridge;
135 struct pci_controller *controller;
136 struct list_head resources;
141 new_space (u64 phys_base, int sparse)
147 return 0; /* legacy I/O port space */
149 mmio_base = (u64) ioremap(phys_base, 0);
150 for (i = 0; i < num_io_spaces; i++)
151 if (io_space[i].mmio_base == mmio_base &&
152 io_space[i].sparse == sparse)
155 if (num_io_spaces == MAX_IO_SPACES) {
156 printk(KERN_ERR "PCI: Too many IO port spaces "
157 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
162 io_space[i].mmio_base = mmio_base;
163 io_space[i].sparse = sparse;
168 static u64 add_io_space(struct pci_root_info *info,
169 struct acpi_resource_address64 *addr)
171 struct resource *resource;
173 unsigned long base, min, max, base_port;
174 unsigned int sparse = 0, space_nr, len;
176 resource = kzalloc(sizeof(*resource), GFP_KERNEL);
178 printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
183 len = strlen(info->name) + 32;
184 name = kzalloc(len, GFP_KERNEL);
186 printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
192 max = min + addr->address_length - 1;
193 if (addr->info.io.translation_type == ACPI_SPARSE_TRANSLATION)
196 space_nr = new_space(addr->translation_offset, sparse);
200 base = __pa(io_space[space_nr].mmio_base);
201 base_port = IO_SPACE_BASE(space_nr);
202 snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
203 base_port + min, base_port + max);
206 * The SDM guarantees the legacy 0-64K space is sparse, but if the
207 * mapping is done by the processor (not the bridge), ACPI may not
213 resource->name = name;
214 resource->flags = IORESOURCE_MEM;
215 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
216 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
217 insert_resource(&iomem_resource, resource);
229 static acpi_status resource_to_window(struct acpi_resource *resource,
230 struct acpi_resource_address64 *addr)
235 * We're only interested in _CRS descriptors that are
236 * - address space descriptors for memory or I/O space
238 * - producers, i.e., the address space is routed downstream,
239 * not consumed by the bridge itself
241 status = acpi_resource_to_address64(resource, addr);
242 if (ACPI_SUCCESS(status) &&
243 (addr->resource_type == ACPI_MEMORY_RANGE ||
244 addr->resource_type == ACPI_IO_RANGE) &&
245 addr->address_length &&
246 addr->producer_consumer == ACPI_PRODUCER)
252 static acpi_status count_window(struct acpi_resource *resource, void *data)
254 unsigned int *windows = (unsigned int *) data;
255 struct acpi_resource_address64 addr;
258 status = resource_to_window(resource, &addr);
259 if (ACPI_SUCCESS(status))
265 static acpi_status add_window(struct acpi_resource *res, void *data)
267 struct pci_root_info *info = data;
268 struct pci_window *window;
269 struct acpi_resource_address64 addr;
271 unsigned long flags, offset = 0;
272 struct resource *root;
274 /* Return AE_OK for non-window resources to keep scanning for more */
275 status = resource_to_window(res, &addr);
276 if (!ACPI_SUCCESS(status))
279 if (addr.resource_type == ACPI_MEMORY_RANGE) {
280 flags = IORESOURCE_MEM;
281 root = &iomem_resource;
282 offset = addr.translation_offset;
283 } else if (addr.resource_type == ACPI_IO_RANGE) {
284 flags = IORESOURCE_IO;
285 root = &ioport_resource;
286 offset = add_io_space(info, &addr);
292 window = &info->controller->window[info->controller->windows++];
293 window->resource.name = info->name;
294 window->resource.flags = flags;
295 window->resource.start = addr.minimum + offset;
296 window->resource.end = window->resource.start + addr.address_length - 1;
297 window->offset = offset;
299 if (insert_resource(root, &window->resource)) {
300 dev_err(&info->bridge->dev,
301 "can't allocate host bridge window %pR\n",
305 dev_info(&info->bridge->dev, "host bridge window %pR "
306 "(PCI address [%#llx-%#llx])\n",
308 window->resource.start - offset,
309 window->resource.end - offset);
311 dev_info(&info->bridge->dev,
312 "host bridge window %pR\n",
316 /* HP's firmware has a hack to work around a Windows bug.
317 * Ignore these tiny memory ranges */
318 if (!((window->resource.flags & IORESOURCE_MEM) &&
319 (window->resource.end - window->resource.start < 16)))
320 pci_add_resource_offset(&info->resources, &window->resource,
326 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
328 struct acpi_device *device = root->device;
329 int domain = root->segment;
330 int bus = root->secondary.start;
331 struct pci_controller *controller;
332 unsigned int windows = 0;
333 struct pci_root_info info;
334 struct pci_bus *pbus;
338 controller = alloc_pci_controller(domain);
342 controller->acpi_handle = device->handle;
344 pxm = acpi_get_pxm(controller->acpi_handle);
347 controller->node = pxm_to_node(pxm);
350 INIT_LIST_HEAD(&info.resources);
351 /* insert busn resource at first */
352 pci_add_resource(&info.resources, &root->secondary);
353 acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
357 kzalloc_node(sizeof(*controller->window) * windows,
358 GFP_KERNEL, controller->node);
359 if (!controller->window)
362 name = kmalloc(16, GFP_KERNEL);
366 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
367 info.bridge = device;
368 info.controller = controller;
370 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
374 * See arch/x86/pci/acpi.c.
375 * The desired pci bus might already be scanned in a quirk. We
376 * should handle the case here, but it appears that IA64 hasn't
377 * such quirk. So we just ignore the case now.
379 pbus = pci_create_root_bus(NULL, bus, &pci_root_ops, controller,
382 pci_free_resource_list(&info.resources);
386 pci_scan_child_bus(pbus);
390 kfree(controller->window);
397 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
399 struct pci_controller *controller = bridge->bus->sysdata;
401 ACPI_HANDLE_SET(&bridge->dev, controller->acpi_handle);
405 static int is_valid_resource(struct pci_dev *dev, int idx)
407 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
408 struct resource *devr = &dev->resource[idx], *busr;
413 pci_bus_for_each_resource(dev->bus, busr, i) {
414 if (!busr || ((busr->flags ^ devr->flags) & type_mask))
416 if ((devr->start) && (devr->start >= busr->start) &&
417 (devr->end <= busr->end))
423 static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
427 for (i = start; i < limit; i++) {
428 if (!dev->resource[i].flags)
430 if ((is_valid_resource(dev, i)))
431 pci_claim_resource(dev, i);
435 void pcibios_fixup_device_resources(struct pci_dev *dev)
437 pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
439 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
441 static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
443 pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
447 * Called after each bus is probed, but before its children are examined.
449 void pcibios_fixup_bus(struct pci_bus *b)
454 pci_read_bridge_bases(b);
455 pcibios_fixup_bridge_resources(b->self);
457 list_for_each_entry(dev, &b->devices, bus_list)
458 pcibios_fixup_device_resources(dev);
459 platform_pci_fixup_bus(b);
462 void pcibios_add_bus(struct pci_bus *bus)
464 acpi_pci_add_bus(bus);
467 void pcibios_remove_bus(struct pci_bus *bus)
469 acpi_pci_remove_bus(bus);
472 void pcibios_set_master (struct pci_dev *dev)
474 /* No special bus mastering setup handling */
478 pcibios_enable_device (struct pci_dev *dev, int mask)
482 ret = pci_enable_resources(dev, mask);
486 if (!dev->msi_enabled)
487 return acpi_pci_irq_enable(dev);
492 pcibios_disable_device (struct pci_dev *dev)
494 BUG_ON(atomic_read(&dev->enable_cnt));
495 if (!dev->msi_enabled)
496 acpi_pci_irq_disable(dev);
500 pcibios_align_resource (void *data, const struct resource *res,
501 resource_size_t size, resource_size_t align)
507 pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
508 enum pci_mmap_state mmap_state, int write_combine)
510 unsigned long size = vma->vm_end - vma->vm_start;
514 * I/O space cannot be accessed via normal processor loads and
515 * stores on this platform.
517 if (mmap_state == pci_mmap_io)
519 * XXX we could relax this for I/O spaces for which ACPI
520 * indicates that the space is 1-to-1 mapped. But at the
521 * moment, we don't support multiple PCI address spaces and
522 * the legacy I/O space is not 1-to-1 mapped, so this is moot.
526 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
529 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
533 * If the user requested WC, the kernel uses UC or WC for this region,
534 * and the chipset supports WC, we can use WC. Otherwise, we have to
535 * use the same attribute the kernel uses.
538 ((pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_UC ||
539 (pgprot_val(prot) & _PAGE_MA_MASK) == _PAGE_MA_WC) &&
540 efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
541 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
543 vma->vm_page_prot = prot;
545 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
546 vma->vm_end - vma->vm_start, vma->vm_page_prot))
553 * ia64_pci_get_legacy_mem - generic legacy mem routine
554 * @bus: bus to get legacy memory base address for
556 * Find the base of legacy memory for @bus. This is typically the first
557 * megabyte of bus address space for @bus or is simply 0 on platforms whose
558 * chipsets support legacy I/O and memory routing. Returns the base address
559 * or an error pointer if an error occurred.
561 * This is the ia64 generic version of this routine. Other platforms
562 * are free to override it with a machine vector.
564 char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
566 return (char *)__IA64_UNCACHED_OFFSET;
570 * pci_mmap_legacy_page_range - map legacy memory space to userland
571 * @bus: bus whose legacy space we're mapping
572 * @vma: vma passed in by mmap
574 * Map legacy memory space for this device back to userspace using a machine
575 * vector to get the base address.
578 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
579 enum pci_mmap_state mmap_state)
581 unsigned long size = vma->vm_end - vma->vm_start;
585 /* We only support mmap'ing of legacy memory space */
586 if (mmap_state != pci_mmap_mem)
590 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
593 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
595 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
598 addr = pci_get_legacy_mem(bus);
600 return PTR_ERR(addr);
602 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
603 vma->vm_page_prot = prot;
605 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
606 size, vma->vm_page_prot))
613 * ia64_pci_legacy_read - read from legacy I/O space
615 * @port: legacy port value
616 * @val: caller allocated storage for returned value
617 * @size: number of bytes to read
619 * Simply reads @size bytes from @port and puts the result in @val.
621 * Again, this (and the write routine) are generic versions that can be
622 * overridden by the platform. This is necessary on platforms that don't
623 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
625 int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
648 * ia64_pci_legacy_write - perform a legacy I/O write
650 * @port: port to write
651 * @val: value to write
652 * @size: number of bytes to write from @val
654 * Simply writes @size bytes of @val to @port.
656 int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
679 * set_pci_cacheline_size - determine cacheline size for PCI devices
681 * We want to use the line-size of the outer-most cache. We assume
682 * that this line-size is the same for all CPUs.
684 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
686 static void __init set_pci_dfl_cacheline_size(void)
688 unsigned long levels, unique_caches;
690 pal_cache_config_info_t cci;
692 status = ia64_pal_cache_summary(&levels, &unique_caches);
694 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed "
695 "(status=%ld)\n", __func__, status);
699 status = ia64_pal_cache_config_info(levels - 1,
700 /* cache_type (data_or_unified)= */ 2, &cci);
702 printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed "
703 "(status=%ld)\n", __func__, status);
706 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
709 u64 ia64_dma_get_required_mask(struct device *dev)
711 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
712 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
715 if (!high_totalram) {
716 /* convert to mask just covering totalram */
717 low_totalram = (1 << (fls(low_totalram) - 1));
718 low_totalram += low_totalram - 1;
721 high_totalram = (1 << (fls(high_totalram) - 1));
722 high_totalram += high_totalram - 1;
723 mask = (((u64)high_totalram) << 32) + 0xffffffff;
727 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
729 u64 dma_get_required_mask(struct device *dev)
731 return platform_dma_get_required_mask(dev);
733 EXPORT_SYMBOL_GPL(dma_get_required_mask);
735 static int __init pcibios_init(void)
737 set_pci_dfl_cacheline_size();
741 subsys_initcall(pcibios_init);