2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
7 * The System z PCI code is a rewrite from a prototype by
8 * the following people (Kudoz!):
18 #define COMPONENT "zPCI"
19 #define pr_fmt(fmt) COMPONENT ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
39 #define DEBUG /* enable pr_debug */
41 #define SIC_IRQ_MODE_ALL 0
42 #define SIC_IRQ_MODE_SINGLE 1
44 #define ZPCI_NR_DMA_SPACES 1
45 #define ZPCI_MSI_VEC_BITS 6
46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
48 /* list of all detected zpci devices */
50 EXPORT_SYMBOL_GPL(zpci_list);
51 DEFINE_MUTEX(zpci_list_lock);
52 EXPORT_SYMBOL_GPL(zpci_list_lock);
54 struct pci_hp_callback_ops hotplug_ops;
55 EXPORT_SYMBOL_GPL(hotplug_ops);
57 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
58 static DEFINE_SPINLOCK(zpci_domain_lock);
61 irq_handler_t handler;
66 unsigned long aibv; /* AI bit vector */
67 int msi_vecs; /* consecutive MSI-vectors used */
69 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
70 spinlock_t lock; /* protect callbacks against de-reg */
74 /* amap of adapters, one bit per dev, corresponds to one irq nr */
76 /* AI summary bit, global page for all devices */
78 /* pointer to aibv and callback data in zdev */
79 struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
80 /* protects the whole bucket struct */
84 static struct intr_bucket *bucket;
86 /* Adapter local summary indicator */
87 static u8 *zpci_irq_si;
89 static atomic_t irq_retries = ATOMIC_INIT(0);
92 static DEFINE_SPINLOCK(zpci_iomap_lock);
93 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
94 struct zpci_iomap_entry *zpci_iomap_start;
95 EXPORT_SYMBOL_GPL(zpci_iomap_start);
97 /* highest irq summary bit */
98 static int __read_mostly aisb_max;
100 static struct kmem_cache *zdev_irq_cache;
102 static inline int irq_to_msi_nr(unsigned int irq)
104 return irq & ZPCI_MSI_MASK;
107 static inline int irq_to_dev_nr(unsigned int irq)
109 return irq >> ZPCI_MSI_VEC_BITS;
112 static inline struct zdev_irq_map *get_imap(unsigned int irq)
114 return bucket->imap[irq_to_dev_nr(irq)];
117 struct zpci_dev *get_zdev(struct pci_dev *pdev)
119 return (struct zpci_dev *) pdev->sysdata;
122 struct zpci_dev *get_zdev_by_fid(u32 fid)
124 struct zpci_dev *tmp, *zdev = NULL;
126 mutex_lock(&zpci_list_lock);
127 list_for_each_entry(tmp, &zpci_list, entry) {
128 if (tmp->fid == fid) {
133 mutex_unlock(&zpci_list_lock);
137 bool zpci_fid_present(u32 fid)
139 return (get_zdev_by_fid(fid) != NULL) ? true : false;
142 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
144 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
147 int pci_domain_nr(struct pci_bus *bus)
149 return ((struct zpci_dev *) bus->sysdata)->domain;
151 EXPORT_SYMBOL_GPL(pci_domain_nr);
153 int pci_proc_domain(struct pci_bus *bus)
155 return pci_domain_nr(bus);
157 EXPORT_SYMBOL_GPL(pci_proc_domain);
159 /* Store PCI function information block */
160 static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
162 struct zpci_fib *fib;
165 fib = (void *) get_zeroed_page(GFP_KERNEL);
170 cc = __stpcifc(zdev->fh, 0, fib, &status);
172 msleep(ZPCI_INSN_BUSY_DELAY);
173 memset(fib, 0, PAGE_SIZE);
178 pr_err_once("%s: cc: %u status: %u\n",
179 __func__, cc, status);
181 /* Return PCI function controls */
184 free_page((unsigned long) fib);
185 return (cc) ? -EIO : 0;
188 /* Modify PCI: Register adapter interruptions */
189 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
192 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
193 struct zpci_fib *fib;
196 fib = (void *) get_zeroed_page(GFP_KERNEL);
201 fib->noi = zdev->irq_map->msi_vecs;
202 fib->sum = 1; /* enable summary notifications */
204 fib->aibvo = 0; /* every function has its own page */
205 fib->aisb = (u64) bucket->aisb + aisb / 8;
206 fib->aisbo = aisb & ZPCI_MSI_MASK;
208 rc = mpcifc_instr(req, fib);
209 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
211 free_page((unsigned long) fib);
215 struct mod_pci_args {
221 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
223 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
224 struct zpci_fib *fib;
227 /* The FIB must be available even if it's not used */
228 fib = (void *) get_zeroed_page(GFP_KERNEL);
232 fib->pba = args->base;
233 fib->pal = args->limit;
234 fib->iota = args->iota;
236 rc = mpcifc_instr(req, fib);
237 free_page((unsigned long) fib);
241 /* Modify PCI: Register I/O address translation parameters */
242 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
243 u64 base, u64 limit, u64 iota)
245 struct mod_pci_args args = { base, limit, iota };
247 WARN_ON_ONCE(iota & 0x3fff);
248 args.iota |= ZPCI_IOTA_RTTO_FLAG;
249 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
252 /* Modify PCI: Unregister I/O address translation parameters */
253 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
255 struct mod_pci_args args = { 0, 0, 0 };
257 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
260 /* Modify PCI: Unregister adapter interruptions */
261 static int zpci_unregister_airq(struct zpci_dev *zdev)
263 struct mod_pci_args args = { 0, 0, 0 };
265 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
268 #define ZPCI_PCIAS_CFGSPC 15
270 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
272 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
276 rc = pcilg_instr(&data, req, offset);
277 data = data << ((8 - len) * 8);
278 data = le64_to_cpu(data);
286 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
288 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
292 data = cpu_to_le64(data);
293 data = data >> ((8 - len) * 8);
294 rc = pcistg_instr(data, req, offset);
298 void synchronize_irq(unsigned int irq)
301 * Not needed, the handler is protected by a lock and IRQs that occur
302 * after the handler is deleted are just NOPs.
305 EXPORT_SYMBOL_GPL(synchronize_irq);
307 void enable_irq(unsigned int irq)
309 struct msi_desc *msi = irq_get_msi_desc(irq);
311 zpci_msi_set_mask_bits(msi, 1, 0);
313 EXPORT_SYMBOL_GPL(enable_irq);
315 void disable_irq(unsigned int irq)
317 struct msi_desc *msi = irq_get_msi_desc(irq);
319 zpci_msi_set_mask_bits(msi, 1, 1);
321 EXPORT_SYMBOL_GPL(disable_irq);
323 void disable_irq_nosync(unsigned int irq)
327 EXPORT_SYMBOL_GPL(disable_irq_nosync);
329 unsigned long probe_irq_on(void)
333 EXPORT_SYMBOL_GPL(probe_irq_on);
335 int probe_irq_off(unsigned long val)
339 EXPORT_SYMBOL_GPL(probe_irq_off);
341 unsigned int probe_irq_mask(unsigned long val)
345 EXPORT_SYMBOL_GPL(probe_irq_mask);
347 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
351 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
352 resource_size_t size,
353 resource_size_t align)
358 /* combine single writes by using store-block insn */
359 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
361 zpci_memcpy_toio(to, from, count);
364 /* Create a virtual mapping cookie for a PCI BAR */
365 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
367 struct zpci_dev *zdev = get_zdev(pdev);
371 if ((bar & 7) != bar)
374 idx = zdev->bars[bar].map_idx;
375 spin_lock(&zpci_iomap_lock);
376 zpci_iomap_start[idx].fh = zdev->fh;
377 zpci_iomap_start[idx].bar = bar;
378 spin_unlock(&zpci_iomap_lock);
380 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
381 return (void __iomem *) addr;
383 EXPORT_SYMBOL_GPL(pci_iomap);
385 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
389 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
390 spin_lock(&zpci_iomap_lock);
391 zpci_iomap_start[idx].fh = 0;
392 zpci_iomap_start[idx].bar = 0;
393 spin_unlock(&zpci_iomap_lock);
395 EXPORT_SYMBOL_GPL(pci_iounmap);
397 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
400 struct zpci_dev *zdev = get_zdev_by_bus(bus);
402 if (!zdev || devfn != ZPCI_DEVFN)
404 return zpci_cfg_load(zdev, where, val, size);
407 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
410 struct zpci_dev *zdev = get_zdev_by_bus(bus);
412 if (!zdev || devfn != ZPCI_DEVFN)
414 return zpci_cfg_store(zdev, where, val, size);
417 static struct pci_ops pci_root_ops = {
422 /* store the last handled bit to implement fair scheduling of devices */
423 static DEFINE_PER_CPU(unsigned long, next_sbit);
425 static void zpci_irq_handler(void *dont, void *need)
427 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
428 int rescan = 0, max = aisb_max;
429 struct zdev_irq_map *imap;
431 kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
435 /* find summary_bit */
436 for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
437 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
440 /* find vector bit */
441 imap = bucket->imap[sbit];
442 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
443 kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
444 clear_bit(63 - mbit, &imap->aibv);
446 spin_lock(&imap->lock);
447 if (imap->cb[mbit].handler)
448 imap->cb[mbit].handler(mbit,
449 imap->cb[mbit].data);
450 spin_unlock(&imap->lock);
457 /* scan the skipped bits */
465 /* enable interrupts again */
466 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
468 /* check again to not lose initiative */
471 sbit = find_first_bit_left(bucket->aisb, max);
473 atomic_inc(&irq_retries);
478 /* store next device bit to scan */
479 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
482 /* msi_vecs - number of requested interrupts, 0 place function to error state */
483 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
485 struct zpci_dev *zdev = get_zdev(pdev);
486 unsigned int aisb, msi_nr;
487 struct msi_desc *msi;
490 /* store the number of used MSI vectors */
491 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
493 spin_lock(&bucket->lock);
494 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
495 /* alloc map exhausted? */
496 if (aisb == PAGE_SIZE) {
497 spin_unlock(&bucket->lock);
500 set_bit(aisb, bucket->alloc);
501 spin_unlock(&bucket->lock);
504 if (aisb + 1 > aisb_max)
507 /* wire up IRQ shortcut pointer */
508 bucket->imap[zdev->aisb] = zdev->irq_map;
509 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
511 /* TODO: irq number 0 wont be found if we return less than requested MSIs.
512 * ignore it for now and fix in common code.
514 msi_nr = aisb << ZPCI_MSI_VEC_BITS;
516 list_for_each_entry(msi, &pdev->msi_list, list) {
517 rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
518 aisb << ZPCI_MSI_VEC_BITS);
524 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
526 clear_bit(aisb, bucket->alloc);
527 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
530 return (zdev->irq_map->msi_vecs == msi_vecs) ?
531 0 : zdev->irq_map->msi_vecs;
534 static void zpci_teardown_msi(struct pci_dev *pdev)
536 struct zpci_dev *zdev = get_zdev(pdev);
537 struct msi_desc *msi;
540 rc = zpci_unregister_airq(zdev);
542 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
546 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
547 aisb = irq_to_dev_nr(msi->irq);
549 list_for_each_entry(msi, &pdev->msi_list, list)
550 zpci_teardown_msi_irq(zdev, msi);
552 clear_bit(aisb, bucket->alloc);
553 if (aisb + 1 == aisb_max)
557 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
559 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
560 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
562 return zpci_setup_msi(pdev, nvec);
565 void arch_teardown_msi_irqs(struct pci_dev *pdev)
567 pr_info("%s: on pdev: %p\n", __func__, pdev);
568 zpci_teardown_msi(pdev);
571 static void zpci_map_resources(struct zpci_dev *zdev)
573 struct pci_dev *pdev = zdev->pdev;
577 for (i = 0; i < PCI_BAR_COUNT; i++) {
578 len = pci_resource_len(pdev, i);
581 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
582 pdev->resource[i].end = pdev->resource[i].start + len - 1;
583 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
584 i, pdev->resource[i].start, pdev->resource[i].end);
588 static void zpci_unmap_resources(struct pci_dev *pdev)
593 for (i = 0; i < PCI_BAR_COUNT; i++) {
594 len = pci_resource_len(pdev, i);
597 pci_iounmap(pdev, (void *) pdev->resource[i].start);
601 struct zpci_dev *zpci_alloc_device(void)
603 struct zpci_dev *zdev;
605 /* Alloc memory for our private pci device data */
606 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
608 return ERR_PTR(-ENOMEM);
610 /* Alloc aibv & callback space */
611 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
614 WARN_ON((u64) zdev->irq_map & 0xff);
619 return ERR_PTR(-ENOMEM);
622 void zpci_free_device(struct zpci_dev *zdev)
624 kmem_cache_free(zdev_irq_cache, zdev->irq_map);
628 /* Called on removal of pci_dev, leaves zpci and bus device */
629 static void zpci_remove_device(struct pci_dev *pdev)
631 struct zpci_dev *zdev = get_zdev(pdev);
633 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
634 zdev->state = ZPCI_FN_STATE_CONFIGURED;
635 zpci_dma_exit_device(zdev);
636 zpci_sysfs_remove_device(&pdev->dev);
637 zpci_unmap_resources(pdev);
638 list_del(&zdev->entry); /* can be called from init */
642 static void zpci_scan_devices(void)
644 struct zpci_dev *zdev;
646 mutex_lock(&zpci_list_lock);
647 list_for_each_entry(zdev, &zpci_list, entry)
648 if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
649 zpci_scan_device(zdev);
650 mutex_unlock(&zpci_list_lock);
654 * Too late for any s390 specific setup, since interrupts must be set up
655 * already which requires DMA setup too and the pci scan will access the
656 * config space, which only works if the function handle is enabled.
658 int pcibios_enable_device(struct pci_dev *pdev, int mask)
660 struct resource *res;
664 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
666 for (i = 0; i < PCI_BAR_COUNT; i++) {
667 res = &pdev->resource[i];
669 if (res->flags & IORESOURCE_IO)
672 if (res->flags & IORESOURCE_MEM)
673 cmd |= PCI_COMMAND_MEMORY;
675 pci_write_config_word(pdev, PCI_COMMAND, cmd);
679 void pcibios_disable_device(struct pci_dev *pdev)
681 zpci_remove_device(pdev);
682 pdev->sysdata = NULL;
685 int pcibios_add_platform_entries(struct pci_dev *pdev)
687 return zpci_sysfs_add_device(&pdev->dev);
690 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
692 int msi_nr = irq_to_msi_nr(irq);
693 struct zdev_irq_map *imap;
694 struct msi_desc *msi;
696 msi = irq_get_msi_desc(irq);
700 imap = get_imap(irq);
701 spin_lock_init(&imap->lock);
703 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
704 imap->cb[msi_nr].handler = handler;
705 imap->cb[msi_nr].data = data;
708 * The generic MSI code returns with the interrupt disabled on the
709 * card, using the MSI mask bits. Firmware doesn't appear to unmask
710 * at that level, so we do it here by hand.
712 zpci_msi_set_mask_bits(msi, 1, 0);
716 void zpci_free_irq(unsigned int irq)
718 struct zdev_irq_map *imap = get_imap(irq);
719 int msi_nr = irq_to_msi_nr(irq);
722 pr_debug("%s: for irq: %d\n", __func__, irq);
724 spin_lock_irqsave(&imap->lock, flags);
725 imap->cb[msi_nr].handler = NULL;
726 imap->cb[msi_nr].data = NULL;
727 spin_unlock_irqrestore(&imap->lock, flags);
730 int request_irq(unsigned int irq, irq_handler_t handler,
731 unsigned long irqflags, const char *devname, void *dev_id)
733 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
734 __func__, irq, handler, irqflags, devname);
736 return zpci_request_irq(irq, handler, dev_id);
738 EXPORT_SYMBOL_GPL(request_irq);
740 void free_irq(unsigned int irq, void *dev_id)
744 EXPORT_SYMBOL_GPL(free_irq);
746 static int __init zpci_irq_init(void)
750 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
754 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
760 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
761 if (!bucket->alloc) {
766 isc_register(PCI_ISC);
767 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
768 if (IS_ERR(zpci_irq_si)) {
769 rc = PTR_ERR(zpci_irq_si);
774 for_each_online_cpu(cpu)
775 per_cpu(next_sbit, cpu) = 0;
777 spin_lock_init(&bucket->lock);
778 /* set summary to 1 to be called every time for the ISC */
780 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
784 isc_unregister(PCI_ISC);
785 free_page((unsigned long) bucket->alloc);
787 free_page((unsigned long) bucket->aisb);
793 static void zpci_irq_exit(void)
795 free_page((unsigned long) bucket->alloc);
796 free_page((unsigned long) bucket->aisb);
797 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
798 isc_unregister(PCI_ISC);
802 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
803 unsigned long flags, int domain)
809 r = kzalloc(sizeof(*r), GFP_KERNEL);
811 return ERR_PTR(-ENOMEM);
813 r->end = r->start + size - 1;
815 r->parent = &iomem_resource;
816 name = kmalloc(18, GFP_KERNEL);
819 return ERR_PTR(-ENOMEM);
821 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
824 rc = request_resource(&iomem_resource, r);
826 pr_debug("request resource %pR failed\n", r);
830 static int zpci_alloc_iomap(struct zpci_dev *zdev)
834 spin_lock(&zpci_iomap_lock);
835 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
836 if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
837 spin_unlock(&zpci_iomap_lock);
840 set_bit(entry, zpci_iomap);
841 spin_unlock(&zpci_iomap_lock);
845 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
847 spin_lock(&zpci_iomap_lock);
848 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
849 clear_bit(entry, zpci_iomap);
850 spin_unlock(&zpci_iomap_lock);
853 static int zpci_create_device_bus(struct zpci_dev *zdev)
855 struct resource *res;
856 LIST_HEAD(resources);
859 /* allocate mapping entry for each used bar */
860 for (i = 0; i < PCI_BAR_COUNT; i++) {
861 unsigned long addr, size, flags;
864 if (!zdev->bars[i].size)
866 entry = zpci_alloc_iomap(zdev);
869 zdev->bars[i].map_idx = entry;
871 /* only MMIO is supported */
872 flags = IORESOURCE_MEM;
873 if (zdev->bars[i].val & 8)
874 flags |= IORESOURCE_PREFETCH;
875 if (zdev->bars[i].val & 4)
876 flags |= IORESOURCE_MEM_64;
878 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
880 size = 1UL << zdev->bars[i].size;
882 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
884 zpci_free_iomap(zdev, entry);
887 pci_add_resource(&resources, res);
890 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
895 zdev->bus->max_bus_speed = zdev->max_bus_speed;
899 static int zpci_alloc_domain(struct zpci_dev *zdev)
901 spin_lock(&zpci_domain_lock);
902 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
903 if (zdev->domain == ZPCI_NR_DEVICES) {
904 spin_unlock(&zpci_domain_lock);
907 set_bit(zdev->domain, zpci_domain);
908 spin_unlock(&zpci_domain_lock);
912 static void zpci_free_domain(struct zpci_dev *zdev)
914 spin_lock(&zpci_domain_lock);
915 clear_bit(zdev->domain, zpci_domain);
916 spin_unlock(&zpci_domain_lock);
919 int zpci_enable_device(struct zpci_dev *zdev)
923 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
926 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
928 rc = zpci_dma_init_device(zdev);
934 clp_disable_fh(zdev);
938 EXPORT_SYMBOL_GPL(zpci_enable_device);
940 int zpci_create_device(struct zpci_dev *zdev)
944 rc = zpci_alloc_domain(zdev);
948 rc = zpci_create_device_bus(zdev);
952 mutex_lock(&zpci_list_lock);
953 list_add_tail(&zdev->entry, &zpci_list);
954 if (hotplug_ops.create_slot)
955 hotplug_ops.create_slot(zdev);
956 mutex_unlock(&zpci_list_lock);
958 if (zdev->state == ZPCI_FN_STATE_STANDBY)
961 rc = zpci_enable_device(zdev);
967 mutex_lock(&zpci_list_lock);
968 list_del(&zdev->entry);
969 if (hotplug_ops.remove_slot)
970 hotplug_ops.remove_slot(zdev);
971 mutex_unlock(&zpci_list_lock);
973 zpci_free_domain(zdev);
978 void zpci_stop_device(struct zpci_dev *zdev)
980 zpci_dma_exit_device(zdev);
982 * Note: SCLP disables fh via set-pci-fn so don't
986 EXPORT_SYMBOL_GPL(zpci_stop_device);
988 int zpci_scan_device(struct zpci_dev *zdev)
990 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
992 pr_err("pci_scan_single_device failed for fid: 0x%x\n",
997 zpci_map_resources(zdev);
998 pci_bus_add_devices(zdev->bus);
1000 /* now that pdev was added to the bus mark it as used */
1001 zdev->state = ZPCI_FN_STATE_ONLINE;
1005 zpci_dma_exit_device(zdev);
1006 clp_disable_fh(zdev);
1009 EXPORT_SYMBOL_GPL(zpci_scan_device);
1011 static inline int barsize(u8 size)
1013 return (size) ? (1 << size) >> 10 : 0;
1016 static int zpci_mem_init(void)
1018 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
1019 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
1020 if (!zdev_irq_cache)
1023 /* TODO: use realloc */
1024 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
1026 if (!zpci_iomap_start)
1031 kmem_cache_destroy(zdev_irq_cache);
1036 static void zpci_mem_exit(void)
1038 kfree(zpci_iomap_start);
1039 kmem_cache_destroy(zdev_irq_cache);
1042 unsigned int pci_probe = 1;
1043 EXPORT_SYMBOL_GPL(pci_probe);
1045 char * __init pcibios_setup(char *str)
1047 if (!strcmp(str, "off")) {
1054 static int __init pci_base_init(void)
1061 if (!test_facility(2) || !test_facility(69)
1062 || !test_facility(71) || !test_facility(72))
1065 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
1066 test_facility(69), test_facility(70),
1069 rc = zpci_mem_init();
1073 rc = zpci_msihash_init();
1077 rc = zpci_irq_init();
1081 rc = zpci_dma_init();
1085 rc = clp_find_pci_devices();
1089 zpci_scan_devices();
1097 zpci_msihash_exit();
1103 subsys_initcall(pci_base_init);