powerpc: Implement dma_mmap_coherent()
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>
Thu, 24 Mar 2011 20:50:06 +0000 (20:50 +0000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Tue, 29 Mar 2011 23:44:00 +0000 (10:44 +1100)
This is used by Alsa to mmap buffers allocated with dma_alloc_coherent()
into userspace. We need a special variant to handle machines with
non-coherent DMAs as those buffers have "special" virt addresses and
require non-cachable mappings

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/include/asm/dma-mapping.h
arch/powerpc/kernel/dma.c
arch/powerpc/mm/dma-noncoherent.c

index 6d2416a857096be9548964987af7f701a5fe15c2..dd70fac57ec896253990fc1761fd3ad96fa6098a 100644 (file)
@@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
 extern void __dma_sync(void *vaddr, size_t size, int direction);
 extern void __dma_sync_page(struct page *page, unsigned long offset,
                                 size_t size, int direction);
+extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
 
 #else /* ! CONFIG_NOT_COHERENT_CACHE */
 /*
@@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
 
+extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+                            void *, dma_addr_t, size_t);
+#define ARCH_HAS_DMA_MMAP_COHERENT
+
+
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
                enum dma_data_direction direction)
 {
index cf02cad62d9a78ecaae14161d7deb6cd78bdf768..d238c082c3c5fa3acaa3ec9e580e914117039c75 100644 (file)
@@ -179,3 +179,21 @@ static int __init dma_init(void)
        return 0;
 }
 fs_initcall(dma_init);
+
+int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+                     void *cpu_addr, dma_addr_t handle, size_t size)
+{
+       unsigned long pfn;
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+       pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+#else
+       pfn = page_to_pfn(virt_to_page(cpu_addr));
+#endif
+       return remap_pfn_range(vma, vma->vm_start,
+                              pfn + vma->vm_pgoff,
+                              vma->vm_end - vma->vm_start,
+                              vma->vm_page_prot);
+}
+EXPORT_SYMBOL_GPL(dma_mmap_coherent);
index 757c0bed9a91e5c76e40dbd2a50346f0bc397b10..b42f76c4948dba17b1d84efb32bb3f06790df1e0 100644 (file)
@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,
 #endif
 }
 EXPORT_SYMBOL(__dma_sync_page);
+
+/*
+ * Return the PFN for a given cpu virtual address returned by
+ * __dma_alloc_coherent. This is used by dma_mmap_coherent()
+ */
+unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
+{
+       /* This should always be populated, so we don't test every
+        * level. If that fails, we'll have a nice crash which
+        * will be as good as a BUG_ON()
+        */
+       pgd_t *pgd = pgd_offset_k(cpu_addr);
+       pud_t *pud = pud_offset(pgd, cpu_addr);
+       pmd_t *pmd = pmd_offset(pud, cpu_addr);
+       pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
+
+       if (pte_none(*ptep) || !pte_present(*ptep))
+               return 0;
+       return pte_pfn(*ptep);
+}