Merge tag 'blackfin-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/realm...
[firefly-linux-kernel-4.4.55.git] / arch / tile / kernel / pci-dma.c
index adc369d8c77bc0266c7443062da446688c6958fc..09b58703ac264a7218e2f4586abd7e1f4d59f82a 100644 (file)
@@ -257,7 +257,7 @@ static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
        BUG_ON(!valid_dma_direction(direction));
 
        __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
-                           dma_address & PAGE_OFFSET, size, direction);
+                           dma_address & (PAGE_SIZE - 1), size, direction);
 }
 
 static void tile_dma_sync_single_for_cpu(struct device *dev,
@@ -436,7 +436,7 @@ static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
        dma_address -= get_dma_offset(dev);
 
        __dma_complete_page(pfn_to_page(PFN_DOWN(dma_address)),
-                           dma_address & PAGE_OFFSET, size, direction);
+                           dma_address & (PAGE_SIZE - 1), size, direction);
 }
 
 static void tile_pci_dma_sync_single_for_cpu(struct device *dev,
@@ -588,15 +588,18 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
 {
        struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
-       /* Handle hybrid PCI devices with limited memory addressability. */
-       if ((dma_ops == gx_pci_dma_map_ops ||
-            dma_ops == gx_hybrid_pci_dma_map_ops ||
-            dma_ops == gx_legacy_pci_dma_map_ops) &&
-           (mask <= DMA_BIT_MASK(32))) {
-               if (dma_ops == gx_pci_dma_map_ops)
-                       set_dma_ops(dev, gx_hybrid_pci_dma_map_ops);
-
-               if (mask > dev->archdata.max_direct_dma_addr)
+       /*
+        * For PCI devices with 64-bit DMA addressing capability, promote
+        * the dma_ops to full capability for both streams and consistent
+        * memory access. For 32-bit capable devices, limit the consistent 
+        * memory DMA range to max_direct_dma_addr.
+        */
+       if (dma_ops == gx_pci_dma_map_ops ||
+           dma_ops == gx_hybrid_pci_dma_map_ops ||
+           dma_ops == gx_legacy_pci_dma_map_ops) {
+               if (mask == DMA_BIT_MASK(64))
+                       set_dma_ops(dev, gx_pci_dma_map_ops);
+               else if (mask > dev->archdata.max_direct_dma_addr)
                        mask = dev->archdata.max_direct_dma_addr;
        }
 
@@ -607,3 +610,21 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
 }
 EXPORT_SYMBOL(dma_set_coherent_mask);
 #endif
+
+#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
+/*
+ * The generic dma_get_required_mask() uses the highest physical address
+ * (max_pfn) to provide the hint to the PCI drivers regarding 32-bit or
+ * 64-bit DMA configuration. Since TILEGx has I/O TLB/MMU, allowing the
+ * DMAs to use the full 64-bit PCI address space and not limited by
+ * the physical memory space, we always let the PCI devices use
+ * 64-bit DMA if they have that capability, by returning the 64-bit
+ * DMA mask here. The device driver has the option to use 32-bit DMA if
+ * the device is not capable of 64-bit DMA.
+ */
+u64 dma_get_required_mask(struct device *dev)
+{
+       return DMA_BIT_MASK(64);
+}
+EXPORT_SYMBOL_GPL(dma_get_required_mask);
+#endif