2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
7 * The System z PCI code is a rewrite from a prototype by
8 * the following people (Kudoz!):
18 #define COMPONENT "zPCI"
19 #define pr_fmt(fmt) COMPONENT ": " fmt
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/err.h>
24 #include <linux/export.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/kernel_stat.h>
28 #include <linux/seq_file.h>
29 #include <linux/pci.h>
30 #include <linux/msi.h>
34 #include <asm/facility.h>
35 #include <asm/pci_insn.h>
36 #include <asm/pci_clp.h>
37 #include <asm/pci_dma.h>
39 #define DEBUG /* enable pr_debug */
41 #define SIC_IRQ_MODE_ALL 0
42 #define SIC_IRQ_MODE_SINGLE 1
44 #define ZPCI_NR_DMA_SPACES 1
45 #define ZPCI_MSI_VEC_BITS 6
46 #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
48 /* list of all detected zpci devices */
50 EXPORT_SYMBOL_GPL(zpci_list);
51 DEFINE_MUTEX(zpci_list_lock);
52 EXPORT_SYMBOL_GPL(zpci_list_lock);
54 struct pci_hp_callback_ops hotplug_ops;
55 EXPORT_SYMBOL_GPL(hotplug_ops);
57 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
58 static DEFINE_SPINLOCK(zpci_domain_lock);
61 irq_handler_t handler;
66 unsigned long aibv; /* AI bit vector */
67 int msi_vecs; /* consecutive MSI-vectors used */
69 struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
70 spinlock_t lock; /* protect callbacks against de-reg */
74 /* amap of adapters, one bit per dev, corresponds to one irq nr */
76 /* AI summary bit, global page for all devices */
78 /* pointer to aibv and callback data in zdev */
79 struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
80 /* protects the whole bucket struct */
84 static struct intr_bucket *bucket;
86 /* Adapter local summary indicator */
87 static u8 *zpci_irq_si;
89 static atomic_t irq_retries = ATOMIC_INIT(0);
92 static DEFINE_SPINLOCK(zpci_iomap_lock);
93 static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
94 struct zpci_iomap_entry *zpci_iomap_start;
95 EXPORT_SYMBOL_GPL(zpci_iomap_start);
97 /* highest irq summary bit */
98 static int __read_mostly aisb_max;
100 static struct kmem_cache *zdev_irq_cache;
101 static struct kmem_cache *zdev_fmb_cache;
103 debug_info_t *pci_debug_msg_id;
104 debug_info_t *pci_debug_err_id;
106 static inline int irq_to_msi_nr(unsigned int irq)
108 return irq & ZPCI_MSI_MASK;
111 static inline int irq_to_dev_nr(unsigned int irq)
113 return irq >> ZPCI_MSI_VEC_BITS;
116 static inline struct zdev_irq_map *get_imap(unsigned int irq)
118 return bucket->imap[irq_to_dev_nr(irq)];
121 struct zpci_dev *get_zdev(struct pci_dev *pdev)
123 return (struct zpci_dev *) pdev->sysdata;
126 struct zpci_dev *get_zdev_by_fid(u32 fid)
128 struct zpci_dev *tmp, *zdev = NULL;
130 mutex_lock(&zpci_list_lock);
131 list_for_each_entry(tmp, &zpci_list, entry) {
132 if (tmp->fid == fid) {
137 mutex_unlock(&zpci_list_lock);
141 bool zpci_fid_present(u32 fid)
143 return (get_zdev_by_fid(fid) != NULL) ? true : false;
146 static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
148 return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
151 int pci_domain_nr(struct pci_bus *bus)
153 return ((struct zpci_dev *) bus->sysdata)->domain;
155 EXPORT_SYMBOL_GPL(pci_domain_nr);
157 int pci_proc_domain(struct pci_bus *bus)
159 return pci_domain_nr(bus);
161 EXPORT_SYMBOL_GPL(pci_proc_domain);
163 /* Store PCI function information block */
164 static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc)
166 struct zpci_fib *fib;
169 fib = (void *) get_zeroed_page(GFP_KERNEL);
174 cc = __stpcifc(zdev->fh, 0, fib, &status);
176 msleep(ZPCI_INSN_BUSY_DELAY);
177 memset(fib, 0, PAGE_SIZE);
182 pr_err_once("%s: cc: %u status: %u\n",
183 __func__, cc, status);
185 /* Return PCI function controls */
188 free_page((unsigned long) fib);
189 return (cc) ? -EIO : 0;
192 /* Modify PCI: Register adapter interruptions */
193 static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
196 u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
197 struct zpci_fib *fib;
200 fib = (void *) get_zeroed_page(GFP_KERNEL);
205 fib->noi = zdev->irq_map->msi_vecs;
206 fib->sum = 1; /* enable summary notifications */
208 fib->aibvo = 0; /* every function has its own page */
209 fib->aisb = (u64) bucket->aisb + aisb / 8;
210 fib->aisbo = aisb & ZPCI_MSI_MASK;
212 rc = mpcifc_instr(req, fib);
213 pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
215 free_page((unsigned long) fib);
219 struct mod_pci_args {
226 static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args *args)
228 u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, fn);
229 struct zpci_fib *fib;
232 /* The FIB must be available even if it's not used */
233 fib = (void *) get_zeroed_page(GFP_KERNEL);
237 fib->pba = args->base;
238 fib->pal = args->limit;
239 fib->iota = args->iota;
240 fib->fmb_addr = args->fmb_addr;
242 rc = mpcifc_instr(req, fib);
243 free_page((unsigned long) fib);
247 /* Modify PCI: Register I/O address translation parameters */
248 int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
249 u64 base, u64 limit, u64 iota)
251 struct mod_pci_args args = { base, limit, iota, 0 };
253 WARN_ON_ONCE(iota & 0x3fff);
254 args.iota |= ZPCI_IOTA_RTTO_FLAG;
255 return mod_pci(zdev, ZPCI_MOD_FC_REG_IOAT, dmaas, &args);
258 /* Modify PCI: Unregister I/O address translation parameters */
259 int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
261 struct mod_pci_args args = { 0, 0, 0, 0 };
263 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_IOAT, dmaas, &args);
266 /* Modify PCI: Unregister adapter interruptions */
267 static int zpci_unregister_airq(struct zpci_dev *zdev)
269 struct mod_pci_args args = { 0, 0, 0, 0 };
271 return mod_pci(zdev, ZPCI_MOD_FC_DEREG_INT, 0, &args);
274 /* Modify PCI: Set PCI function measurement parameters */
275 int zpci_fmb_enable_device(struct zpci_dev *zdev)
277 struct mod_pci_args args = { 0, 0, 0, 0 };
282 zdev->fmb = kmem_cache_alloc(zdev_fmb_cache, GFP_KERNEL);
285 memset(zdev->fmb, 0, sizeof(*zdev->fmb));
286 WARN_ON((u64) zdev->fmb & 0xf);
288 args.fmb_addr = virt_to_phys(zdev->fmb);
289 return mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
292 /* Modify PCI: Disable PCI function measurement */
293 int zpci_fmb_disable_device(struct zpci_dev *zdev)
295 struct mod_pci_args args = { 0, 0, 0, 0 };
301 /* Function measurement is disabled if fmb address is zero */
302 rc = mod_pci(zdev, ZPCI_MOD_FC_SET_MEASURE, 0, &args);
304 kmem_cache_free(zdev_fmb_cache, zdev->fmb);
309 #define ZPCI_PCIAS_CFGSPC 15
311 static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
313 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
317 rc = pcilg_instr(&data, req, offset);
318 data = data << ((8 - len) * 8);
319 data = le64_to_cpu(data);
327 static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
329 u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
333 data = cpu_to_le64(data);
334 data = data >> ((8 - len) * 8);
335 rc = pcistg_instr(data, req, offset);
339 void synchronize_irq(unsigned int irq)
342 * Not needed, the handler is protected by a lock and IRQs that occur
343 * after the handler is deleted are just NOPs.
346 EXPORT_SYMBOL_GPL(synchronize_irq);
348 void enable_irq(unsigned int irq)
350 struct msi_desc *msi = irq_get_msi_desc(irq);
352 zpci_msi_set_mask_bits(msi, 1, 0);
354 EXPORT_SYMBOL_GPL(enable_irq);
356 void disable_irq(unsigned int irq)
358 struct msi_desc *msi = irq_get_msi_desc(irq);
360 zpci_msi_set_mask_bits(msi, 1, 1);
362 EXPORT_SYMBOL_GPL(disable_irq);
364 void disable_irq_nosync(unsigned int irq)
368 EXPORT_SYMBOL_GPL(disable_irq_nosync);
370 unsigned long probe_irq_on(void)
374 EXPORT_SYMBOL_GPL(probe_irq_on);
376 int probe_irq_off(unsigned long val)
380 EXPORT_SYMBOL_GPL(probe_irq_off);
382 unsigned int probe_irq_mask(unsigned long val)
386 EXPORT_SYMBOL_GPL(probe_irq_mask);
388 void __devinit pcibios_fixup_bus(struct pci_bus *bus)
392 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
393 resource_size_t size,
394 resource_size_t align)
399 /* combine single writes by using store-block insn */
400 void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
402 zpci_memcpy_toio(to, from, count);
405 /* Create a virtual mapping cookie for a PCI BAR */
406 void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
408 struct zpci_dev *zdev = get_zdev(pdev);
412 if ((bar & 7) != bar)
415 idx = zdev->bars[bar].map_idx;
416 spin_lock(&zpci_iomap_lock);
417 zpci_iomap_start[idx].fh = zdev->fh;
418 zpci_iomap_start[idx].bar = bar;
419 spin_unlock(&zpci_iomap_lock);
421 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
422 return (void __iomem *) addr;
424 EXPORT_SYMBOL_GPL(pci_iomap);
426 void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
430 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
431 spin_lock(&zpci_iomap_lock);
432 zpci_iomap_start[idx].fh = 0;
433 zpci_iomap_start[idx].bar = 0;
434 spin_unlock(&zpci_iomap_lock);
436 EXPORT_SYMBOL_GPL(pci_iounmap);
438 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
441 struct zpci_dev *zdev = get_zdev_by_bus(bus);
443 if (!zdev || devfn != ZPCI_DEVFN)
445 return zpci_cfg_load(zdev, where, val, size);
448 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
451 struct zpci_dev *zdev = get_zdev_by_bus(bus);
453 if (!zdev || devfn != ZPCI_DEVFN)
455 return zpci_cfg_store(zdev, where, val, size);
458 static struct pci_ops pci_root_ops = {
463 /* store the last handled bit to implement fair scheduling of devices */
464 static DEFINE_PER_CPU(unsigned long, next_sbit);
466 static void zpci_irq_handler(void *dont, void *need)
468 unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
469 int rescan = 0, max = aisb_max;
470 struct zdev_irq_map *imap;
472 kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++;
476 /* find summary_bit */
477 for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
478 clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
481 /* find vector bit */
482 imap = bucket->imap[sbit];
483 for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
484 kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++;
485 clear_bit(63 - mbit, &imap->aibv);
487 spin_lock(&imap->lock);
488 if (imap->cb[mbit].handler)
489 imap->cb[mbit].handler(mbit,
490 imap->cb[mbit].data);
491 spin_unlock(&imap->lock);
498 /* scan the skipped bits */
506 /* enable interrupts again */
507 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
509 /* check again to not lose initiative */
512 sbit = find_first_bit_left(bucket->aisb, max);
514 atomic_inc(&irq_retries);
519 /* store next device bit to scan */
520 __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
523 /* msi_vecs - number of requested interrupts, 0 place function to error state */
524 static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
526 struct zpci_dev *zdev = get_zdev(pdev);
527 unsigned int aisb, msi_nr;
528 struct msi_desc *msi;
531 /* store the number of used MSI vectors */
532 zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
534 spin_lock(&bucket->lock);
535 aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
536 /* alloc map exhausted? */
537 if (aisb == PAGE_SIZE) {
538 spin_unlock(&bucket->lock);
541 set_bit(aisb, bucket->alloc);
542 spin_unlock(&bucket->lock);
545 if (aisb + 1 > aisb_max)
548 /* wire up IRQ shortcut pointer */
549 bucket->imap[zdev->aisb] = zdev->irq_map;
550 pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
552 /* TODO: irq number 0 wont be found if we return less than requested MSIs.
553 * ignore it for now and fix in common code.
555 msi_nr = aisb << ZPCI_MSI_VEC_BITS;
557 list_for_each_entry(msi, &pdev->msi_list, list) {
558 rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
559 aisb << ZPCI_MSI_VEC_BITS);
565 rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
567 clear_bit(aisb, bucket->alloc);
568 dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
571 return (zdev->irq_map->msi_vecs == msi_vecs) ?
572 0 : zdev->irq_map->msi_vecs;
575 static void zpci_teardown_msi(struct pci_dev *pdev)
577 struct zpci_dev *zdev = get_zdev(pdev);
578 struct msi_desc *msi;
581 rc = zpci_unregister_airq(zdev);
583 dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
587 msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
588 aisb = irq_to_dev_nr(msi->irq);
590 list_for_each_entry(msi, &pdev->msi_list, list)
591 zpci_teardown_msi_irq(zdev, msi);
593 clear_bit(aisb, bucket->alloc);
594 if (aisb + 1 == aisb_max)
598 int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
600 pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
601 if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
603 return zpci_setup_msi(pdev, nvec);
606 void arch_teardown_msi_irqs(struct pci_dev *pdev)
608 pr_info("%s: on pdev: %p\n", __func__, pdev);
609 zpci_teardown_msi(pdev);
612 static void zpci_map_resources(struct zpci_dev *zdev)
614 struct pci_dev *pdev = zdev->pdev;
618 for (i = 0; i < PCI_BAR_COUNT; i++) {
619 len = pci_resource_len(pdev, i);
622 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
623 pdev->resource[i].end = pdev->resource[i].start + len - 1;
624 pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
625 i, pdev->resource[i].start, pdev->resource[i].end);
629 static void zpci_unmap_resources(struct pci_dev *pdev)
634 for (i = 0; i < PCI_BAR_COUNT; i++) {
635 len = pci_resource_len(pdev, i);
638 pci_iounmap(pdev, (void *) pdev->resource[i].start);
642 struct zpci_dev *zpci_alloc_device(void)
644 struct zpci_dev *zdev;
646 /* Alloc memory for our private pci device data */
647 zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
649 return ERR_PTR(-ENOMEM);
651 /* Alloc aibv & callback space */
652 zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
655 WARN_ON((u64) zdev->irq_map & 0xff);
660 return ERR_PTR(-ENOMEM);
663 void zpci_free_device(struct zpci_dev *zdev)
665 kmem_cache_free(zdev_irq_cache, zdev->irq_map);
669 /* Called on removal of pci_dev, leaves zpci and bus device */
670 static void zpci_remove_device(struct pci_dev *pdev)
672 struct zpci_dev *zdev = get_zdev(pdev);
674 dev_info(&pdev->dev, "Removing device %u\n", zdev->domain);
675 zdev->state = ZPCI_FN_STATE_CONFIGURED;
676 zpci_dma_exit_device(zdev);
677 zpci_fmb_disable_device(zdev);
678 zpci_sysfs_remove_device(&pdev->dev);
679 zpci_unmap_resources(pdev);
680 list_del(&zdev->entry); /* can be called from init */
684 static void zpci_scan_devices(void)
686 struct zpci_dev *zdev;
688 mutex_lock(&zpci_list_lock);
689 list_for_each_entry(zdev, &zpci_list, entry)
690 if (zdev->state == ZPCI_FN_STATE_CONFIGURED)
691 zpci_scan_device(zdev);
692 mutex_unlock(&zpci_list_lock);
696 * Too late for any s390 specific setup, since interrupts must be set up
697 * already which requires DMA setup too and the pci scan will access the
698 * config space, which only works if the function handle is enabled.
700 int pcibios_enable_device(struct pci_dev *pdev, int mask)
702 struct resource *res;
706 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
708 for (i = 0; i < PCI_BAR_COUNT; i++) {
709 res = &pdev->resource[i];
711 if (res->flags & IORESOURCE_IO)
714 if (res->flags & IORESOURCE_MEM)
715 cmd |= PCI_COMMAND_MEMORY;
717 pci_write_config_word(pdev, PCI_COMMAND, cmd);
721 void pcibios_disable_device(struct pci_dev *pdev)
723 zpci_remove_device(pdev);
724 pdev->sysdata = NULL;
727 int pcibios_add_platform_entries(struct pci_dev *pdev)
729 return zpci_sysfs_add_device(&pdev->dev);
732 int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
734 int msi_nr = irq_to_msi_nr(irq);
735 struct zdev_irq_map *imap;
736 struct msi_desc *msi;
738 msi = irq_get_msi_desc(irq);
742 imap = get_imap(irq);
743 spin_lock_init(&imap->lock);
745 pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
746 imap->cb[msi_nr].handler = handler;
747 imap->cb[msi_nr].data = data;
750 * The generic MSI code returns with the interrupt disabled on the
751 * card, using the MSI mask bits. Firmware doesn't appear to unmask
752 * at that level, so we do it here by hand.
754 zpci_msi_set_mask_bits(msi, 1, 0);
758 void zpci_free_irq(unsigned int irq)
760 struct zdev_irq_map *imap = get_imap(irq);
761 int msi_nr = irq_to_msi_nr(irq);
764 pr_debug("%s: for irq: %d\n", __func__, irq);
766 spin_lock_irqsave(&imap->lock, flags);
767 imap->cb[msi_nr].handler = NULL;
768 imap->cb[msi_nr].data = NULL;
769 spin_unlock_irqrestore(&imap->lock, flags);
772 int request_irq(unsigned int irq, irq_handler_t handler,
773 unsigned long irqflags, const char *devname, void *dev_id)
775 pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
776 __func__, irq, handler, irqflags, devname);
778 return zpci_request_irq(irq, handler, dev_id);
780 EXPORT_SYMBOL_GPL(request_irq);
782 void free_irq(unsigned int irq, void *dev_id)
786 EXPORT_SYMBOL_GPL(free_irq);
788 static int __init zpci_irq_init(void)
792 bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
796 bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
802 bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
803 if (!bucket->alloc) {
808 isc_register(PCI_ISC);
809 zpci_irq_si = s390_register_adapter_interrupt(&zpci_irq_handler, NULL, PCI_ISC);
810 if (IS_ERR(zpci_irq_si)) {
811 rc = PTR_ERR(zpci_irq_si);
816 for_each_online_cpu(cpu)
817 per_cpu(next_sbit, cpu) = 0;
819 spin_lock_init(&bucket->lock);
820 /* set summary to 1 to be called every time for the ISC */
822 sic_instr(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
826 isc_unregister(PCI_ISC);
827 free_page((unsigned long) bucket->alloc);
829 free_page((unsigned long) bucket->aisb);
835 static void zpci_irq_exit(void)
837 free_page((unsigned long) bucket->alloc);
838 free_page((unsigned long) bucket->aisb);
839 s390_unregister_adapter_interrupt(zpci_irq_si, PCI_ISC);
840 isc_unregister(PCI_ISC);
844 void zpci_debug_info(struct zpci_dev *zdev, struct seq_file *m)
849 seq_printf(m, "global irq retries: %u\n", atomic_read(&irq_retries));
850 seq_printf(m, "aibv[0]:%016lx aibv[1]:%016lx aisb:%016lx\n",
851 get_imap(0)->aibv, get_imap(1)->aibv, *bucket->aisb);
854 static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
855 unsigned long flags, int domain)
861 r = kzalloc(sizeof(*r), GFP_KERNEL);
863 return ERR_PTR(-ENOMEM);
865 r->end = r->start + size - 1;
867 r->parent = &iomem_resource;
868 name = kmalloc(18, GFP_KERNEL);
871 return ERR_PTR(-ENOMEM);
873 sprintf(name, "PCI Bus: %04x:%02x", domain, ZPCI_BUS_NR);
876 rc = request_resource(&iomem_resource, r);
878 pr_debug("request resource %pR failed\n", r);
882 static int zpci_alloc_iomap(struct zpci_dev *zdev)
886 spin_lock(&zpci_iomap_lock);
887 entry = find_first_zero_bit(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
888 if (entry == ZPCI_IOMAP_MAX_ENTRIES) {
889 spin_unlock(&zpci_iomap_lock);
892 set_bit(entry, zpci_iomap);
893 spin_unlock(&zpci_iomap_lock);
897 static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
899 spin_lock(&zpci_iomap_lock);
900 memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
901 clear_bit(entry, zpci_iomap);
902 spin_unlock(&zpci_iomap_lock);
905 static int zpci_create_device_bus(struct zpci_dev *zdev)
907 struct resource *res;
908 LIST_HEAD(resources);
911 /* allocate mapping entry for each used bar */
912 for (i = 0; i < PCI_BAR_COUNT; i++) {
913 unsigned long addr, size, flags;
916 if (!zdev->bars[i].size)
918 entry = zpci_alloc_iomap(zdev);
921 zdev->bars[i].map_idx = entry;
923 /* only MMIO is supported */
924 flags = IORESOURCE_MEM;
925 if (zdev->bars[i].val & 8)
926 flags |= IORESOURCE_PREFETCH;
927 if (zdev->bars[i].val & 4)
928 flags |= IORESOURCE_MEM_64;
930 addr = ZPCI_IOMAP_ADDR_BASE + ((u64) entry << 48);
932 size = 1UL << zdev->bars[i].size;
934 res = zpci_alloc_bus_resource(addr, size, flags, zdev->domain);
936 zpci_free_iomap(zdev, entry);
939 pci_add_resource(&resources, res);
942 zdev->bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, &pci_root_ops,
947 zdev->bus->max_bus_speed = zdev->max_bus_speed;
951 static int zpci_alloc_domain(struct zpci_dev *zdev)
953 spin_lock(&zpci_domain_lock);
954 zdev->domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
955 if (zdev->domain == ZPCI_NR_DEVICES) {
956 spin_unlock(&zpci_domain_lock);
959 set_bit(zdev->domain, zpci_domain);
960 spin_unlock(&zpci_domain_lock);
964 static void zpci_free_domain(struct zpci_dev *zdev)
966 spin_lock(&zpci_domain_lock);
967 clear_bit(zdev->domain, zpci_domain);
968 spin_unlock(&zpci_domain_lock);
971 int zpci_enable_device(struct zpci_dev *zdev)
975 rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
978 pr_info("Enabled fh: 0x%x fid: 0x%x\n", zdev->fh, zdev->fid);
980 rc = zpci_dma_init_device(zdev);
986 clp_disable_fh(zdev);
990 EXPORT_SYMBOL_GPL(zpci_enable_device);
992 int zpci_create_device(struct zpci_dev *zdev)
996 rc = zpci_alloc_domain(zdev);
1000 rc = zpci_create_device_bus(zdev);
1004 mutex_lock(&zpci_list_lock);
1005 list_add_tail(&zdev->entry, &zpci_list);
1006 if (hotplug_ops.create_slot)
1007 hotplug_ops.create_slot(zdev);
1008 mutex_unlock(&zpci_list_lock);
1010 if (zdev->state == ZPCI_FN_STATE_STANDBY)
1013 rc = zpci_enable_device(zdev);
1019 mutex_lock(&zpci_list_lock);
1020 list_del(&zdev->entry);
1021 if (hotplug_ops.remove_slot)
1022 hotplug_ops.remove_slot(zdev);
1023 mutex_unlock(&zpci_list_lock);
1025 zpci_free_domain(zdev);
1030 void zpci_stop_device(struct zpci_dev *zdev)
1032 zpci_dma_exit_device(zdev);
1034 * Note: SCLP disables fh via set-pci-fn so don't
1038 EXPORT_SYMBOL_GPL(zpci_stop_device);
1040 int zpci_scan_device(struct zpci_dev *zdev)
1042 zdev->pdev = pci_scan_single_device(zdev->bus, ZPCI_DEVFN);
1044 pr_err("pci_scan_single_device failed for fid: 0x%x\n",
1049 zpci_debug_init_device(zdev);
1050 zpci_fmb_enable_device(zdev);
1051 zpci_map_resources(zdev);
1052 pci_bus_add_devices(zdev->bus);
1054 /* now that pdev was added to the bus mark it as used */
1055 zdev->state = ZPCI_FN_STATE_ONLINE;
1059 zpci_dma_exit_device(zdev);
1060 clp_disable_fh(zdev);
1063 EXPORT_SYMBOL_GPL(zpci_scan_device);
1065 static inline int barsize(u8 size)
1067 return (size) ? (1 << size) >> 10 : 0;
1070 static int zpci_mem_init(void)
1072 zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
1073 L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
1074 if (!zdev_irq_cache)
1077 zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
1079 if (!zdev_fmb_cache)
1082 /* TODO: use realloc */
1083 zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
1085 if (!zpci_iomap_start)
1090 kmem_cache_destroy(zdev_fmb_cache);
1092 kmem_cache_destroy(zdev_irq_cache);
1097 static void zpci_mem_exit(void)
1099 kfree(zpci_iomap_start);
1100 kmem_cache_destroy(zdev_irq_cache);
1101 kmem_cache_destroy(zdev_fmb_cache);
1104 unsigned int pci_probe = 1;
1105 EXPORT_SYMBOL_GPL(pci_probe);
1107 char * __init pcibios_setup(char *str)
1109 if (!strcmp(str, "off")) {
1116 static int __init pci_base_init(void)
1123 if (!test_facility(2) || !test_facility(69)
1124 || !test_facility(71) || !test_facility(72))
1127 pr_info("Probing PCI hardware: PCI:%d SID:%d AEN:%d\n",
1128 test_facility(69), test_facility(70),
1131 rc = zpci_debug_init();
1135 rc = zpci_mem_init();
1139 rc = zpci_msihash_init();
1143 rc = zpci_irq_init();
1147 rc = zpci_dma_init();
1151 rc = clp_find_pci_devices();
1155 zpci_scan_devices();
1163 zpci_msihash_exit();
1170 subsys_initcall(pci_base_init);