2 * drivers/pci/setup-bus.c
4 * Extruded from code written by
5 * Dave Rusling (david.rusling@reo.mts.dec.com)
6 * David Mosberger (davidm@cs.arizona.edu)
7 * David Miller (davem@redhat.com)
9 * Support routines for initializing a PCI subsystem.
13 * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
14 * PCI-PCI bridges cleanup, sorted resource allocation.
15 * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
16 * Converted to allocation in 3 passes, which gives
17 * tighter packing. Prefetchable range support.
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/pci.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/cache.h>
27 #include <linux/slab.h>
30 struct resource_list_x {
31 struct resource_list_x *next;
34 resource_size_t start;
36 resource_size_t add_size;
37 resource_size_t min_align;
41 #define free_list(type, head) do { \
42 struct type *list, *tmp; \
43 for (list = (head)->next; list;) { \
48 (head)->next = NULL; \
51 int pci_realloc_enable = 0;
52 #define pci_realloc_enabled() pci_realloc_enable
53 void pci_realloc(void)
55 pci_realloc_enable = 1;
59 * add_to_list() - add a new resource tracker to the list
60 * @head: Head of the list
61 * @dev: device corresponding to which the resource
63 * @res: The resource to be tracked
64 * @add_size: additional size to be optionally added
67 static void add_to_list(struct resource_list_x *head,
68 struct pci_dev *dev, struct resource *res,
69 resource_size_t add_size, resource_size_t min_align)
71 struct resource_list_x *list = head;
72 struct resource_list_x *ln = list->next;
73 struct resource_list_x *tmp;
75 tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
77 pr_warning("add_to_list: kmalloc() failed!\n");
84 tmp->start = res->start;
86 tmp->flags = res->flags;
87 tmp->add_size = add_size;
88 tmp->min_align = min_align;
92 static void add_to_failed_list(struct resource_list_x *head,
93 struct pci_dev *dev, struct resource *res)
95 add_to_list(head, dev, res,
100 static void __dev_sort_resources(struct pci_dev *dev,
101 struct resource_list *head)
103 u16 class = dev->class >> 8;
105 /* Don't touch classless devices or host bridges or ioapics. */
106 if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
109 /* Don't touch ioapic devices already enabled by firmware */
110 if (class == PCI_CLASS_SYSTEM_PIC) {
112 pci_read_config_word(dev, PCI_COMMAND, &command);
113 if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
117 pdev_sort_resources(dev, head);
120 static inline void reset_resource(struct resource *res)
128 * adjust_resources_sorted() - satisfy any additional resource requests
130 * @add_head : head of the list tracking requests requiring additional
132 * @head : head of the list tracking requests with allocated
135 * Walk through each element of the add_head and try to procure
136 * additional resources for the element, provided the element
137 * is in the head list.
139 static void adjust_resources_sorted(struct resource_list_x *add_head,
140 struct resource_list *head)
142 struct resource *res;
143 struct resource_list_x *list, *tmp, *prev;
144 struct resource_list *hlist;
145 resource_size_t add_size;
149 for (list = add_head->next; list;) {
151 /* skip resource that has been reset */
155 /* skip this resource if not found in head list */
156 for (hlist = head->next; hlist && hlist->res != res;
157 hlist = hlist->next);
158 if (!hlist) { /* just skip */
164 idx = res - &list->dev->resource[0];
165 add_size=list->add_size;
166 if (!resource_size(res)) {
167 res->end = res->start + add_size - 1;
168 if(pci_assign_resource(list->dev, idx))
171 resource_size_t align = list->min_align;
172 res->flags |= list->flags & (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
173 if (pci_reassign_resource(list->dev, idx, add_size, align))
174 dev_printk(KERN_DEBUG, &list->dev->dev, "failed to add optional resources res=%pR\n",
179 prev->next = list = list->next;
185 * assign_requested_resources_sorted() - satisfy resource requests
187 * @head : head of the list tracking requests for resources
188 * @failed_list : head of the list tracking requests that could
191 * Satisfy resource requests of each element in the list. Add
192 * requests that could not satisfied to the failed_list.
194 static void assign_requested_resources_sorted(struct resource_list *head,
195 struct resource_list_x *fail_head)
197 struct resource *res;
198 struct resource_list *list;
201 for (list = head->next; list; list = list->next) {
203 idx = res - &list->dev->resource[0];
204 if (resource_size(res) && pci_assign_resource(list->dev, idx)) {
205 if (fail_head && !pci_is_root_bus(list->dev->bus)) {
207 * if the failed res is for ROM BAR, and it will
208 * be enabled later, don't add it to the list
210 if (!((idx == PCI_ROM_RESOURCE) &&
211 (!(res->flags & IORESOURCE_ROM_ENABLE))))
212 add_to_failed_list(fail_head, list->dev, res);
219 static void __assign_resources_sorted(struct resource_list *head,
220 struct resource_list_x *add_head,
221 struct resource_list_x *fail_head)
223 /* Satisfy the must-have resource requests */
224 assign_requested_resources_sorted(head, fail_head);
226 /* Try to satisfy any additional nice-to-have resource
229 adjust_resources_sorted(add_head, head);
230 free_list(resource_list, head);
233 static void pdev_assign_resources_sorted(struct pci_dev *dev,
234 struct resource_list_x *fail_head)
236 struct resource_list head;
239 __dev_sort_resources(dev, &head);
240 __assign_resources_sorted(&head, NULL, fail_head);
244 static void pbus_assign_resources_sorted(const struct pci_bus *bus,
245 struct resource_list_x *add_head,
246 struct resource_list_x *fail_head)
249 struct resource_list head;
252 list_for_each_entry(dev, &bus->devices, bus_list)
253 __dev_sort_resources(dev, &head);
255 __assign_resources_sorted(&head, add_head, fail_head);
258 void pci_setup_cardbus(struct pci_bus *bus)
260 struct pci_dev *bridge = bus->self;
261 struct resource *res;
262 struct pci_bus_region region;
264 dev_info(&bridge->dev, "CardBus bridge to [bus %02x-%02x]\n",
265 bus->secondary, bus->subordinate);
267 res = bus->resource[0];
268 pcibios_resource_to_bus(bridge, ®ion, res);
269 if (res->flags & IORESOURCE_IO) {
271 * The IO resource is allocated a range twice as large as it
272 * would normally need. This allows us to set both IO regs.
274 dev_info(&bridge->dev, " bridge window %pR\n", res);
275 pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
277 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
281 res = bus->resource[1];
282 pcibios_resource_to_bus(bridge, ®ion, res);
283 if (res->flags & IORESOURCE_IO) {
284 dev_info(&bridge->dev, " bridge window %pR\n", res);
285 pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
287 pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
291 res = bus->resource[2];
292 pcibios_resource_to_bus(bridge, ®ion, res);
293 if (res->flags & IORESOURCE_MEM) {
294 dev_info(&bridge->dev, " bridge window %pR\n", res);
295 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
297 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
301 res = bus->resource[3];
302 pcibios_resource_to_bus(bridge, ®ion, res);
303 if (res->flags & IORESOURCE_MEM) {
304 dev_info(&bridge->dev, " bridge window %pR\n", res);
305 pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
307 pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
311 EXPORT_SYMBOL(pci_setup_cardbus);
313 /* Initialize bridges with base/limit values we have collected.
314 PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998)
315 requires that if there is no I/O ports or memory behind the
316 bridge, corresponding range must be turned off by writing base
317 value greater than limit to the bridge's base/limit registers.
319 Note: care must be taken when updating I/O base/limit registers
320 of bridges which support 32-bit I/O. This update requires two
321 config space writes, so it's quite possible that an I/O window of
322 the bridge will have some undesirable address (e.g. 0) after the
323 first write. Ditto 64-bit prefetchable MMIO. */
324 static void pci_setup_bridge_io(struct pci_bus *bus)
326 struct pci_dev *bridge = bus->self;
327 struct resource *res;
328 struct pci_bus_region region;
331 /* Set up the top and bottom of the PCI I/O segment for this bus. */
332 res = bus->resource[0];
333 pcibios_resource_to_bus(bridge, ®ion, res);
334 if (res->flags & IORESOURCE_IO) {
335 pci_read_config_dword(bridge, PCI_IO_BASE, &l);
337 l |= (region.start >> 8) & 0x00f0;
338 l |= region.end & 0xf000;
339 /* Set up upper 16 bits of I/O base/limit. */
340 io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
341 dev_info(&bridge->dev, " bridge window %pR\n", res);
343 /* Clear upper 16 bits of I/O base/limit. */
347 /* Temporarily disable the I/O range before updating PCI_IO_BASE. */
348 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
349 /* Update lower 16 bits of I/O base/limit. */
350 pci_write_config_dword(bridge, PCI_IO_BASE, l);
351 /* Update upper 16 bits of I/O base/limit. */
352 pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
355 static void pci_setup_bridge_mmio(struct pci_bus *bus)
357 struct pci_dev *bridge = bus->self;
358 struct resource *res;
359 struct pci_bus_region region;
362 /* Set up the top and bottom of the PCI Memory segment for this bus. */
363 res = bus->resource[1];
364 pcibios_resource_to_bus(bridge, ®ion, res);
365 if (res->flags & IORESOURCE_MEM) {
366 l = (region.start >> 16) & 0xfff0;
367 l |= region.end & 0xfff00000;
368 dev_info(&bridge->dev, " bridge window %pR\n", res);
372 pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
375 static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
377 struct pci_dev *bridge = bus->self;
378 struct resource *res;
379 struct pci_bus_region region;
382 /* Clear out the upper 32 bits of PREF limit.
383 If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily
384 disables PREF range, which is ok. */
385 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
387 /* Set up PREF base/limit. */
389 res = bus->resource[2];
390 pcibios_resource_to_bus(bridge, ®ion, res);
391 if (res->flags & IORESOURCE_PREFETCH) {
392 l = (region.start >> 16) & 0xfff0;
393 l |= region.end & 0xfff00000;
394 if (res->flags & IORESOURCE_MEM_64) {
395 bu = upper_32_bits(region.start);
396 lu = upper_32_bits(region.end);
398 dev_info(&bridge->dev, " bridge window %pR\n", res);
402 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
404 /* Set the upper 32 bits of PREF base & limit. */
405 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
406 pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
409 static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
411 struct pci_dev *bridge = bus->self;
413 dev_info(&bridge->dev, "PCI bridge to [bus %02x-%02x]\n",
414 bus->secondary, bus->subordinate);
416 if (type & IORESOURCE_IO)
417 pci_setup_bridge_io(bus);
419 if (type & IORESOURCE_MEM)
420 pci_setup_bridge_mmio(bus);
422 if (type & IORESOURCE_PREFETCH)
423 pci_setup_bridge_mmio_pref(bus);
425 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
428 static void pci_setup_bridge(struct pci_bus *bus)
430 unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
433 __pci_setup_bridge(bus, type);
436 /* Check whether the bridge supports optional I/O and
437 prefetchable memory ranges. If not, the respective
438 base/limit registers must be read-only and read as 0. */
439 static void pci_bridge_check_ranges(struct pci_bus *bus)
443 struct pci_dev *bridge = bus->self;
444 struct resource *b_res;
446 b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
447 b_res[1].flags |= IORESOURCE_MEM;
449 pci_read_config_word(bridge, PCI_IO_BASE, &io);
451 pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0);
452 pci_read_config_word(bridge, PCI_IO_BASE, &io);
453 pci_write_config_word(bridge, PCI_IO_BASE, 0x0);
456 b_res[0].flags |= IORESOURCE_IO;
457 /* DECchip 21050 pass 2 errata: the bridge may miss an address
458 disconnect boundary by one PCI data phase.
459 Workaround: do not use prefetching on this device. */
460 if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)
462 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
464 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE,
466 pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);
467 pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);
470 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
471 if ((pmem & PCI_PREF_RANGE_TYPE_MASK) ==
472 PCI_PREF_RANGE_TYPE_64) {
473 b_res[2].flags |= IORESOURCE_MEM_64;
474 b_res[2].flags |= PCI_PREF_RANGE_TYPE_64;
478 /* double check if bridge does support 64 bit pref */
479 if (b_res[2].flags & IORESOURCE_MEM_64) {
480 u32 mem_base_hi, tmp;
481 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32,
483 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
485 pci_read_config_dword(bridge, PCI_PREF_BASE_UPPER32, &tmp);
487 b_res[2].flags &= ~IORESOURCE_MEM_64;
488 pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32,
493 /* Helper function for sizing routines: find first available
494 bus resource of a given type. Note: we intentionally skip
495 the bus resources which have already been assigned (that is,
496 have non-NULL parent resource). */
497 static struct resource *find_free_bus_resource(struct pci_bus *bus, unsigned long type)
501 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
504 pci_bus_for_each_resource(bus, r, i) {
505 if (r == &ioport_resource || r == &iomem_resource)
507 if (r && (r->flags & type_mask) == type && !r->parent)
513 static resource_size_t calculate_iosize(resource_size_t size,
514 resource_size_t min_size,
515 resource_size_t size1,
516 resource_size_t old_size,
517 resource_size_t align)
523 /* To be fixed in 2.5: we should have sort of HAVE_ISA
524 flag in the struct pci_bus. */
525 #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
526 size = (size & 0xff) + ((size & ~0xffUL) << 2);
528 size = ALIGN(size + size1, align);
534 static resource_size_t calculate_memsize(resource_size_t size,
535 resource_size_t min_size,
536 resource_size_t size1,
537 resource_size_t old_size,
538 resource_size_t align)
546 size = ALIGN(size + size1, align);
550 static resource_size_t get_res_add_size(struct resource_list_x *add_head,
551 struct resource *res)
553 struct resource_list_x *list;
555 /* check if it is in add_head list */
556 for (list = add_head->next; list && list->res != res;
559 return list->add_size;
565 * pbus_size_io() - size the io window of a given bus
568 * @min_size : the minimum io window that must to be allocated
569 * @add_size : additional optional io window
570 * @add_head : track the additional io window on this list
572 * Sizing the IO windows of the PCI-PCI bridge is trivial,
573 * since these windows have 4K granularity and the IO ranges
574 * of non-bridge PCI devices are limited to 256 bytes.
575 * We must be careful with the ISA aliasing though.
577 static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
578 resource_size_t add_size, struct resource_list_x *add_head)
581 struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
582 unsigned long size = 0, size0 = 0, size1 = 0;
583 resource_size_t children_add_size = 0;
588 list_for_each_entry(dev, &bus->devices, bus_list) {
591 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
592 struct resource *r = &dev->resource[i];
593 unsigned long r_size;
595 if (r->parent || !(r->flags & IORESOURCE_IO))
597 r_size = resource_size(r);
600 /* Might be re-aligned for ISA */
606 children_add_size += get_res_add_size(add_head, r);
609 size0 = calculate_iosize(size, min_size, size1,
610 resource_size(b_res), 4096);
611 if (children_add_size > add_size)
612 add_size = children_add_size;
613 size1 = (!add_head || (add_head && !add_size)) ? size0 :
614 calculate_iosize(size, min_size+add_size, size1,
615 resource_size(b_res), 4096);
616 if (!size0 && !size1) {
617 if (b_res->start || b_res->end)
618 dev_info(&bus->self->dev, "disabling bridge window "
619 "%pR to [bus %02x-%02x] (unused)\n", b_res,
620 bus->secondary, bus->subordinate);
624 /* Alignment of the IO window is always 4K */
626 b_res->end = b_res->start + size0 - 1;
627 b_res->flags |= IORESOURCE_STARTALIGN;
628 if (size1 > size0 && add_head)
629 add_to_list(add_head, bus->self, b_res, size1-size0, 4096);
633 * pbus_size_mem() - size the memory window of a given bus
636 * @min_size : the minimum memory window that must to be allocated
637 * @add_size : additional optional memory window
638 * @add_head : track the additional memory window on this list
640 * Calculate the size of the bus and minimal alignment which
641 * guarantees that all child resources fit in this size.
643 static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
644 unsigned long type, resource_size_t min_size,
645 resource_size_t add_size,
646 struct resource_list_x *add_head)
649 resource_size_t min_align, align, size, size0, size1;
650 resource_size_t aligns[12]; /* Alignments from 1Mb to 2Gb */
651 int order, max_order;
652 struct resource *b_res = find_free_bus_resource(bus, type);
653 unsigned int mem64_mask = 0;
654 resource_size_t children_add_size = 0;
659 memset(aligns, 0, sizeof(aligns));
663 mem64_mask = b_res->flags & IORESOURCE_MEM_64;
664 b_res->flags &= ~IORESOURCE_MEM_64;
666 list_for_each_entry(dev, &bus->devices, bus_list) {
669 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
670 struct resource *r = &dev->resource[i];
671 resource_size_t r_size;
673 if (r->parent || (r->flags & mask) != type)
675 r_size = resource_size(r);
676 /* For bridges size != alignment */
677 align = pci_resource_alignment(dev, r);
678 order = __ffs(align) - 20;
680 dev_warn(&dev->dev, "disabling BAR %d: %pR "
681 "(bad alignment %#llx)\n", i, r,
682 (unsigned long long) align);
689 /* Exclude ranges with size > align from
690 calculation of the alignment. */
692 aligns[order] += align;
693 if (order > max_order)
695 mem64_mask &= r->flags & IORESOURCE_MEM_64;
698 children_add_size += get_res_add_size(add_head, r);
703 for (order = 0; order <= max_order; order++) {
704 resource_size_t align1 = 1;
706 align1 <<= (order + 20);
710 else if (ALIGN(align + min_align, min_align) < align1)
711 min_align = align1 >> 1;
712 align += aligns[order];
714 size0 = calculate_memsize(size, min_size, 0, resource_size(b_res), min_align);
715 if (children_add_size > add_size)
716 add_size = children_add_size;
717 size1 = (!add_head || (add_head && !add_size)) ? size0 :
718 calculate_memsize(size, min_size+add_size, 0,
719 resource_size(b_res), min_align);
720 if (!size0 && !size1) {
721 if (b_res->start || b_res->end)
722 dev_info(&bus->self->dev, "disabling bridge window "
723 "%pR to [bus %02x-%02x] (unused)\n", b_res,
724 bus->secondary, bus->subordinate);
728 b_res->start = min_align;
729 b_res->end = size0 + min_align - 1;
730 b_res->flags |= IORESOURCE_STARTALIGN | mem64_mask;
731 if (size1 > size0 && add_head)
732 add_to_list(add_head, bus->self, b_res, size1-size0, min_align);
736 static void pci_bus_size_cardbus(struct pci_bus *bus)
738 struct pci_dev *bridge = bus->self;
739 struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES];
743 * Reserve some resources for CardBus. We reserve
744 * a fixed amount of bus space for CardBus bridges.
747 b_res[0].end = pci_cardbus_io_size - 1;
748 b_res[0].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
751 b_res[1].end = pci_cardbus_io_size - 1;
752 b_res[1].flags |= IORESOURCE_IO | IORESOURCE_SIZEALIGN;
755 * Check whether prefetchable memory is supported
758 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
759 if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
760 ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
761 pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
762 pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
766 * If we have prefetchable memory support, allocate
767 * two regions. Otherwise, allocate one region of
770 if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
772 b_res[2].end = pci_cardbus_mem_size - 1;
773 b_res[2].flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_SIZEALIGN;
776 b_res[3].end = pci_cardbus_mem_size - 1;
777 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
780 b_res[3].end = pci_cardbus_mem_size * 2 - 1;
781 b_res[3].flags |= IORESOURCE_MEM | IORESOURCE_SIZEALIGN;
785 void __ref __pci_bus_size_bridges(struct pci_bus *bus,
786 struct resource_list_x *add_head)
789 unsigned long mask, prefmask;
790 resource_size_t additional_mem_size = 0, additional_io_size = 0;
792 list_for_each_entry(dev, &bus->devices, bus_list) {
793 struct pci_bus *b = dev->subordinate;
797 switch (dev->class >> 8) {
798 case PCI_CLASS_BRIDGE_CARDBUS:
799 pci_bus_size_cardbus(b);
802 case PCI_CLASS_BRIDGE_PCI:
804 __pci_bus_size_bridges(b, add_head);
813 switch (bus->self->class >> 8) {
814 case PCI_CLASS_BRIDGE_CARDBUS:
815 /* don't size cardbuses yet. */
818 case PCI_CLASS_BRIDGE_PCI:
819 pci_bridge_check_ranges(bus);
820 if (bus->self->is_hotplug_bridge) {
821 additional_io_size = pci_hotplug_io_size;
822 additional_mem_size = pci_hotplug_mem_size;
828 pbus_size_io(bus, 0, additional_io_size, add_head);
829 /* If the bridge supports prefetchable range, size it
830 separately. If it doesn't, or its prefetchable window
831 has already been allocated by arch code, try
832 non-prefetchable range for both types of PCI memory
834 mask = IORESOURCE_MEM;
835 prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
836 if (pbus_size_mem(bus, prefmask, prefmask, 0, additional_mem_size, add_head))
837 mask = prefmask; /* Success, size non-prefetch only. */
839 additional_mem_size += additional_mem_size;
840 pbus_size_mem(bus, mask, IORESOURCE_MEM, 0, additional_mem_size, add_head);
845 void __ref pci_bus_size_bridges(struct pci_bus *bus)
847 __pci_bus_size_bridges(bus, NULL);
849 EXPORT_SYMBOL(pci_bus_size_bridges);
851 static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
852 struct resource_list_x *add_head,
853 struct resource_list_x *fail_head)
858 pbus_assign_resources_sorted(bus, add_head, fail_head);
860 list_for_each_entry(dev, &bus->devices, bus_list) {
861 b = dev->subordinate;
865 __pci_bus_assign_resources(b, add_head, fail_head);
867 switch (dev->class >> 8) {
868 case PCI_CLASS_BRIDGE_PCI:
869 if (!pci_is_enabled(dev))
873 case PCI_CLASS_BRIDGE_CARDBUS:
874 pci_setup_cardbus(b);
878 dev_info(&dev->dev, "not setting up bridge for bus "
879 "%04x:%02x\n", pci_domain_nr(b), b->number);
885 void __ref pci_bus_assign_resources(const struct pci_bus *bus)
887 __pci_bus_assign_resources(bus, NULL, NULL);
889 EXPORT_SYMBOL(pci_bus_assign_resources);
891 static void __ref __pci_bridge_assign_resources(const struct pci_dev *bridge,
892 struct resource_list_x *fail_head)
896 pdev_assign_resources_sorted((struct pci_dev *)bridge, fail_head);
898 b = bridge->subordinate;
902 __pci_bus_assign_resources(b, NULL, fail_head);
904 switch (bridge->class >> 8) {
905 case PCI_CLASS_BRIDGE_PCI:
909 case PCI_CLASS_BRIDGE_CARDBUS:
910 pci_setup_cardbus(b);
914 dev_info(&bridge->dev, "not setting up bridge for bus "
915 "%04x:%02x\n", pci_domain_nr(b), b->number);
919 static void pci_bridge_release_resources(struct pci_bus *bus,
923 bool changed = false;
926 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
930 for (idx = PCI_BRIDGE_RESOURCES; idx <= PCI_BRIDGE_RESOURCE_END;
932 r = &dev->resource[idx];
933 if ((r->flags & type_mask) != type)
938 * if there are children under that, we should release them
941 release_child_resources(r);
942 if (!release_resource(r)) {
943 dev_printk(KERN_DEBUG, &dev->dev,
944 "resource %d %pR released\n", idx, r);
945 /* keep the old size */
946 r->end = resource_size(r) - 1;
954 /* avoiding touch the one without PREF */
955 if (type & IORESOURCE_PREFETCH)
956 type = IORESOURCE_PREFETCH;
957 __pci_setup_bridge(bus, type);
966 * try to release pci bridge resources that is from leaf bridge,
967 * so we can allocate big new one later
969 static void __ref pci_bus_release_bridge_resources(struct pci_bus *bus,
971 enum release_type rel_type)
974 bool is_leaf_bridge = true;
976 list_for_each_entry(dev, &bus->devices, bus_list) {
977 struct pci_bus *b = dev->subordinate;
981 is_leaf_bridge = false;
983 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
986 if (rel_type == whole_subtree)
987 pci_bus_release_bridge_resources(b, type,
991 if (pci_is_root_bus(bus))
994 if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
997 if ((rel_type == whole_subtree) || is_leaf_bridge)
998 pci_bridge_release_resources(bus, type);
1001 static void pci_bus_dump_res(struct pci_bus *bus)
1003 struct resource *res;
1006 pci_bus_for_each_resource(bus, res, i) {
1007 if (!res || !res->end || !res->flags)
1010 dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res);
1014 static void pci_bus_dump_resources(struct pci_bus *bus)
1017 struct pci_dev *dev;
1020 pci_bus_dump_res(bus);
1022 list_for_each_entry(dev, &bus->devices, bus_list) {
1023 b = dev->subordinate;
1027 pci_bus_dump_resources(b);
1031 static int __init pci_bus_get_depth(struct pci_bus *bus)
1034 struct pci_dev *dev;
1036 list_for_each_entry(dev, &bus->devices, bus_list) {
1038 struct pci_bus *b = dev->subordinate;
1042 ret = pci_bus_get_depth(b);
1043 if (ret + 1 > depth)
1049 static int __init pci_get_max_depth(void)
1052 struct pci_bus *bus;
1054 list_for_each_entry(bus, &pci_root_buses, node) {
1057 ret = pci_bus_get_depth(bus);
1067 * first try will not touch pci bridge res
1068 * second and later try will clear small leaf bridge res
1069 * will stop till to the max deepth if can not find good one
1072 pci_assign_unassigned_resources(void)
1074 struct pci_bus *bus;
1075 struct resource_list_x add_list; /* list of resources that
1076 want additional resources */
1077 int tried_times = 0;
1078 enum release_type rel_type = leaf_only;
1079 struct resource_list_x head, *list;
1080 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
1081 IORESOURCE_PREFETCH;
1082 unsigned long failed_type;
1083 int max_depth = pci_get_max_depth();
1088 add_list.next = NULL;
1090 pci_try_num = max_depth + 1;
1091 printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
1092 max_depth, pci_try_num);
1095 /* Depth first, calculate sizes and alignments of all
1096 subordinate buses. */
1097 list_for_each_entry(bus, &pci_root_buses, node)
1098 __pci_bus_size_bridges(bus, &add_list);
1100 /* Depth last, allocate resources and update the hardware. */
1101 list_for_each_entry(bus, &pci_root_buses, node)
1102 __pci_bus_assign_resources(bus, &add_list, &head);
1103 BUG_ON(add_list.next);
1106 /* any device complain? */
1108 goto enable_and_dump;
1110 /* don't realloc if asked to do so */
1111 if (!pci_realloc_enabled()) {
1112 free_list(resource_list_x, &head);
1113 goto enable_and_dump;
1117 for (list = head.next; list;) {
1118 failed_type |= list->flags;
1122 * io port are tight, don't try extra
1123 * or if reach the limit, don't want to try more
1125 failed_type &= type_mask;
1126 if ((failed_type == IORESOURCE_IO) || (tried_times >= pci_try_num)) {
1127 free_list(resource_list_x, &head);
1128 goto enable_and_dump;
1131 printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
1134 /* third times and later will not check if it is leaf */
1135 if ((tried_times + 1) > 2)
1136 rel_type = whole_subtree;
1139 * Try to release leaf bridge's resources that doesn't fit resource of
1140 * child device under that bridge
1142 for (list = head.next; list;) {
1143 bus = list->dev->bus;
1144 pci_bus_release_bridge_resources(bus, list->flags & type_mask,
1148 /* restore size and flags */
1149 for (list = head.next; list;) {
1150 struct resource *res = list->res;
1152 res->start = list->start;
1153 res->end = list->end;
1154 res->flags = list->flags;
1155 if (list->dev->subordinate)
1160 free_list(resource_list_x, &head);
1165 /* Depth last, update the hardware. */
1166 list_for_each_entry(bus, &pci_root_buses, node)
1167 pci_enable_bridges(bus);
1169 /* dump the resource on buses */
1170 list_for_each_entry(bus, &pci_root_buses, node)
1171 pci_bus_dump_resources(bus);
1174 void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
1176 struct pci_bus *parent = bridge->subordinate;
1177 int tried_times = 0;
1178 struct resource_list_x head, *list;
1180 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
1181 IORESOURCE_PREFETCH;
1186 pci_bus_size_bridges(parent);
1187 __pci_bridge_assign_resources(bridge, &head);
1194 if (tried_times >= 2) {
1195 /* still fail, don't need to try more */
1196 free_list(resource_list_x, &head);
1200 printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
1204 * Try to release leaf bridge's resources that doesn't fit resource of
1205 * child device under that bridge
1207 for (list = head.next; list;) {
1208 struct pci_bus *bus = list->dev->bus;
1209 unsigned long flags = list->flags;
1211 pci_bus_release_bridge_resources(bus, flags & type_mask,
1215 /* restore size and flags */
1216 for (list = head.next; list;) {
1217 struct resource *res = list->res;
1219 res->start = list->start;
1220 res->end = list->end;
1221 res->flags = list->flags;
1222 if (list->dev->subordinate)
1227 free_list(resource_list_x, &head);
1232 retval = pci_reenable_device(bridge);
1233 pci_set_master(bridge);
1234 pci_enable_bridges(parent);
1236 EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);