Merge tag 'pci-v4.1-fixes-1' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
[firefly-linux-kernel-4.4.55.git] / arch / sparc / kernel / pci_sun4v.c
index 9b76b9d639e1c0e857bbc74e2643113081b68b2e..d2fe57dad433ea9409c52a3aaf8ae9efa2113032 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/export.h>
 #include <linux/log2.h>
 #include <linux/of_device.h>
-#include <linux/hash.h>
 #include <linux/iommu-common.h>
 
 #include <asm/iommu.h>
@@ -30,7 +29,6 @@
 
 #define DRIVER_NAME    "pci_sun4v"
 #define PFX            DRIVER_NAME ": "
-static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
 
 static unsigned long vpci_major = 1;
 static unsigned long vpci_minor = 1;
@@ -159,13 +157,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        iommu = dev->archdata.iommu;
 
        entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
-                                     __this_cpu_read(iommu_pool_hash));
+                                     (unsigned long)(-1), 0);
 
        if (unlikely(entry == DMA_ERROR_CODE))
                goto range_alloc_fail;
 
-       *dma_addrp = (iommu->tbl.page_table_map_base +
-                     (entry << IO_PAGE_SHIFT));
+       *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
        ret = (void *) first_page;
        first_page = __pa(first_page);
 
@@ -190,7 +187,7 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
        return ret;
 
 iommu_map_fail:
-       iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, false, NULL);
+       iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, DMA_ERROR_CODE);
 
 range_alloc_fail:
        free_pages(first_page, order);
@@ -227,9 +224,9 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
        iommu = dev->archdata.iommu;
        pbm = dev->archdata.host_controller;
        devhandle = pbm->devhandle;
-       entry = ((dvma - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT);
+       entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
        dma_4v_iommu_demap(&devhandle, entry, npages);
-       iommu_tbl_range_free(&iommu->tbl, dvma, npages, false, NULL);
+       iommu_tbl_range_free(&iommu->tbl, dvma, npages, DMA_ERROR_CODE);
        order = get_order(size);
        if (order < 10)
                free_pages((unsigned long)cpu, order);
@@ -257,13 +254,12 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
        npages >>= IO_PAGE_SHIFT;
 
        entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
-                                     __this_cpu_read(iommu_pool_hash));
+                                     (unsigned long)(-1), 0);
 
        if (unlikely(entry == DMA_ERROR_CODE))
                goto bad;
 
-       bus_addr = (iommu->tbl.page_table_map_base +
-                   (entry << IO_PAGE_SHIFT));
+       bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
        ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
        base_paddr = __pa(oaddr & IO_PAGE_MASK);
        prot = HV_PCI_MAP_ATTR_READ;
@@ -292,7 +288,7 @@ bad:
        return DMA_ERROR_CODE;
 
 iommu_map_fail:
-       iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL);
+       iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
        return DMA_ERROR_CODE;
 }
 
@@ -319,9 +315,9 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
        npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
        npages >>= IO_PAGE_SHIFT;
        bus_addr &= IO_PAGE_MASK;
-       entry = (bus_addr - iommu->tbl.page_table_map_base) >> IO_PAGE_SHIFT;
+       entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
        dma_4v_iommu_demap(&devhandle, entry, npages);
-       iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, false, NULL);
+       iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, DMA_ERROR_CODE);
 }
 
 static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
@@ -363,7 +359,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
        max_seg_size = dma_get_max_seg_size(dev);
        seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
                                  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
-       base_shift = iommu->tbl.page_table_map_base >> IO_PAGE_SHIFT;
+       base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
        for_each_sg(sglist, s, nelems, i) {
                unsigned long paddr, npages, entry, out_entry = 0, slen;
 
@@ -376,8 +372,8 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                /* Allocate iommu entries for that segment */
                paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
                npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
-               entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, &handle,
-                                     __this_cpu_read(iommu_pool_hash));
+               entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+                                             &handle, (unsigned long)(-1), 0);
 
                /* Handle failure */
                if (unlikely(entry == DMA_ERROR_CODE)) {
@@ -390,8 +386,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
                iommu_batch_new_entry(entry);
 
                /* Convert entry to a dma_addr_t */
-               dma_addr = iommu->tbl.page_table_map_base +
-                       (entry << IO_PAGE_SHIFT);
+               dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
                dma_addr |= (s->offset & ~IO_PAGE_MASK);
 
                /* Insert into HW table */
@@ -456,7 +451,7 @@ iommu_map_failed:
                        npages = iommu_num_pages(s->dma_address, s->dma_length,
                                                 IO_PAGE_SIZE);
                        iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
-                                            false, NULL);
+                                            DMA_ERROR_CODE);
                        /* XXX demap? XXX */
                        s->dma_address = DMA_ERROR_CODE;
                        s->dma_length = 0;
@@ -492,16 +487,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
                dma_addr_t dma_handle = sg->dma_address;
                unsigned int len = sg->dma_length;
                unsigned long npages;
-               struct iommu_table *tbl = &iommu->tbl;
+               struct iommu_map_table *tbl = &iommu->tbl;
                unsigned long shift = IO_PAGE_SHIFT;
 
                if (!len)
                        break;
                npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
-               entry = ((dma_handle - tbl->page_table_map_base) >> shift);
+               entry = ((dma_handle - tbl->table_map_base) >> shift);
                dma_4v_iommu_demap(&devhandle, entry, npages);
                iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
-                                    false, NULL);
+                                    DMA_ERROR_CODE);
                sg = sg_next(sg);
        }
 
@@ -517,8 +512,6 @@ static struct dma_map_ops sun4v_dma_ops = {
        .unmap_sg                       = dma_4v_unmap_sg,
 };
 
-static struct iommu_tbl_ops dma_4v_iommu_ops;
-
 static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
 {
        struct property *prop;
@@ -533,7 +526,7 @@ static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
 }
 
 static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
-                                           struct iommu_table *iommu)
+                                           struct iommu_map_table *iommu)
 {
        struct iommu_pool *pool;
        unsigned long i, pool_nr, cnt = 0;
@@ -541,7 +534,7 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
 
        devhandle = pbm->devhandle;
        for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
-               pool = &(iommu->arena_pool[pool_nr]);
+               pool = &(iommu->pools[pool_nr]);
                for (i = pool->start; i <= pool->end; i++) {
                        unsigned long ret, io_attrs, ra;
 
@@ -587,8 +580,9 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
        dma_offset = vdma[0];
 
        /* Setup initial software IOMMU state. */
+       spin_lock_init(&iommu->lock);
        iommu->ctx_lowest_free = 1;
-       iommu->tbl.page_table_map_base = dma_offset;
+       iommu->tbl.table_map_base = dma_offset;
        iommu->dma_addr_mask = dma_mask;
 
        /* Allocate and initialize the free area map.  */
@@ -600,8 +594,9 @@ static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
                return -ENOMEM;
        }
        iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
-                           &dma_4v_iommu_ops, false /* no large_pool */,
-                           0 /* default npools */);
+                           NULL, false /* no large_pool */,
+                           0 /* default npools */,
+                           false /* want span boundary checking */);
        sz = probe_existing_entries(pbm, &iommu->tbl);
        if (sz)
                printk("%s: Imported %lu TSB entries from OBP\n",
@@ -1001,17 +996,8 @@ static struct platform_driver pci_sun4v_driver = {
        .probe          = pci_sun4v_probe,
 };
 
-static void setup_iommu_pool_hash(void)
-{
-       unsigned int i;
-
-       for_each_possible_cpu(i)
-               per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
-}
-
 static int __init pci_sun4v_init(void)
 {
-       setup_iommu_pool_hash();
        return platform_driver_register(&pci_sun4v_driver);
 }