Merge tag 'lsk-v3.10-android-15.02'
[firefly-linux-kernel-4.4.55.git] / arch / arm / mm / dma-mapping.c
index 051e904a537906361d84776d50994dcb7fa6a243..1e1842cff808a0e720aa083f05636ba2056ee6f7 100644 (file)
@@ -250,11 +250,14 @@ static void __dma_free_buffer(struct page *page, size_t size)
 
 #ifdef CONFIG_MMU
 #ifdef CONFIG_HUGETLB_PAGE
-#error ARM Coherent DMA allocator does not (yet) support huge TLB
+#warning ARM Coherent DMA allocator does not (yet) support huge TLB
 #endif
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
                                     pgprot_t prot, struct page **ret_page,
+#ifdef CONFIG_ARCH_ROCKCHIP
+                                    struct dma_attrs *attrs,
+#endif
                                     const void *caller);
 
 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
@@ -265,37 +268,19 @@ static void *
 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
        const void *caller)
 {
-       struct vm_struct *area;
-       unsigned long addr;
-
        /*
         * DMA allocation can be mapped to user space, so lets
         * set VM_USERMAP flags too.
         */
-       area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
-                                 caller);
-       if (!area)
-               return NULL;
-       addr = (unsigned long)area->addr;
-       area->phys_addr = __pfn_to_phys(page_to_pfn(page));
-
-       if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
-               vunmap((void *)addr);
-               return NULL;
-       }
-       return (void *)addr;
+       return dma_common_contiguous_remap(page, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+                       prot, caller);
 }
 
 static void __dma_free_remap(void *cpu_addr, size_t size)
 {
-       unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
-       struct vm_struct *area = find_vm_area(cpu_addr);
-       if (!area || (area->flags & flags) != flags) {
-               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
-               return;
-       }
-       unmap_kernel_range((unsigned long)cpu_addr, size);
-       vunmap(cpu_addr);
+       dma_common_free_remap(cpu_addr, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP);
 }
 
 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@@ -360,6 +345,9 @@ static int __init atomic_pool_init(void)
 
        if (IS_ENABLED(CONFIG_DMA_CMA))
                ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
+#ifdef CONFIG_ARCH_ROCKCHIP
+                                             NULL,
+#endif
                                              atomic_pool_init);
        else
                ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
@@ -429,12 +417,21 @@ void __init dma_contiguous_remap(void)
                map.type = MT_MEMORY_DMA_READY;
 
                /*
-                * Clear previous low-memory mapping
+                * Clear previous low-memory mapping to ensure that the
+                * TLB does not see any conflicting entries, then flush
+                * the TLB of the old entries before creating new mappings.
+                *
+                * This ensures that any speculatively loaded TLB entries
+                * (even though they may be rare) can not cause any problems,
+                * and ensures that this code is architecturally compliant.
                 */
                for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
                     addr += PMD_SIZE)
                        pmd_clear(pmd_off_k(addr));
 
+               flush_tlb_kernel_range(__phys_to_virt(start),
+                                      __phys_to_virt(end));
+
                iotable_init(&map, 1);
        }
 }
@@ -557,6 +554,9 @@ static int __free_from_pool(void *start, size_t size)
 
 static void *__alloc_from_contiguous(struct device *dev, size_t size,
                                     pgprot_t prot, struct page **ret_page,
+#ifdef CONFIG_ARCH_ROCKCHIP
+                                    struct dma_attrs *attrs,
+#endif
                                     const void *caller)
 {
        unsigned long order = get_order(size);
@@ -570,6 +570,11 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
 
        __dma_clear_buffer(page, size);
 
+#ifdef CONFIG_ARCH_ROCKCHIP
+       if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+               return (*ret_page=page);
+#endif
+
        if (PageHighMem(page)) {
                ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
                if (!ptr) {
@@ -584,6 +589,20 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
        return ptr;
 }
 
+#ifdef CONFIG_ARCH_ROCKCHIP
+static void __free_from_contiguous(struct device *dev, struct page *page,
+                                  void *cpu_addr, size_t size,
+                                  struct dma_attrs *attrs)
+{
+       if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+               if (PageHighMem(page))
+                       __dma_free_remap(cpu_addr, size);
+               else
+                       __dma_remap(page, size, pgprot_kernel);
+       }
+       dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+#else
 static void __free_from_contiguous(struct device *dev, struct page *page,
                                   void *cpu_addr, size_t size)
 {
@@ -593,6 +612,7 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
                __dma_remap(page, size, pgprot_kernel);
        dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
 }
+#endif
 
 static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
 {
@@ -611,9 +631,17 @@ static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
 #define __get_dma_pgprot(attrs, prot)  __pgprot(0)
 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c)     NULL
 #define __alloc_from_pool(size, ret_page)                      NULL
+#ifdef CONFIG_ARCH_ROCKCHIP
+#define __alloc_from_contiguous(dev, size, prot, ret, attrs, c)        NULL
+#else
 #define __alloc_from_contiguous(dev, size, prot, ret, c)       NULL
+#endif
 #define __free_from_pool(cpu_addr, size)                       0
+#ifdef CONFIG_ARCH_ROCKCHIP
+#define __free_from_contiguous(dev, page, cpu_addr, size, attrs) do { } while (0)
+#else
 #define __free_from_contiguous(dev, page, cpu_addr, size)      do { } while (0)
+#endif
 #define __dma_free_remap(cpu_addr, size)                       do { } while (0)
 
 #endif /* CONFIG_MMU */
@@ -633,7 +661,12 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
 
 
 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+#ifdef CONFIG_ARCH_ROCKCHIP
+                        gfp_t gfp, pgprot_t prot, bool is_coherent,
+                        struct dma_attrs *attrs, const void *caller)
+#else
                         gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
+#endif
 {
        u64 mask = get_coherent_dma_mask(dev);
        struct page *page = NULL;
@@ -673,7 +706,11 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
        else if (!IS_ENABLED(CONFIG_DMA_CMA))
                addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
        else
+#ifdef CONFIG_ARCH_ROCKCHIP
+               addr = __alloc_from_contiguous(dev, size, prot, &page, attrs, caller);
+#else
                addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
+#endif
 
        if (addr)
                *handle = pfn_to_dma(dev, page_to_pfn(page));
@@ -695,6 +732,9 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
                return memory;
 
        return __dma_alloc(dev, size, handle, gfp, prot, false,
+#ifdef CONFIG_ARCH_ROCKCHIP
+                          attrs,
+#endif
                           __builtin_return_address(0));
 }
 
@@ -708,6 +748,9 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
                return memory;
 
        return __dma_alloc(dev, size, handle, gfp, prot, true,
+#ifdef CONFIG_ARCH_ROCKCHIP
+                          attrs,
+#endif
                           __builtin_return_address(0));
 }
 
@@ -767,7 +810,11 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
                 * Non-atomic allocations cannot be freed with IRQs disabled
                 */
                WARN_ON(irqs_disabled());
+#ifdef CONFIG_ARCH_ROCKCHIP
+               __free_from_contiguous(dev, page, cpu_addr, size, attrs);
+#else
                __free_from_contiguous(dev, page, cpu_addr, size);
+#endif
        }
 }
 
@@ -1167,29 +1214,8 @@ static void *
 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
                    const void *caller)
 {
-       unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct vm_struct *area;
-       unsigned long p;
-
-       area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
-                                 caller);
-       if (!area)
-               return NULL;
-
-       area->pages = pages;
-       area->nr_pages = nr_pages;
-       p = (unsigned long)area->addr;
-
-       for (i = 0; i < nr_pages; i++) {
-               phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
-               if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
-                       goto err;
-               p += PAGE_SIZE;
-       }
-       return area->addr;
-err:
-       unmap_kernel_range((unsigned long)area->addr, size);
-       vunmap(area->addr);
+       return dma_common_pages_remap(pages, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
        return NULL;
 }
 
@@ -1386,8 +1412,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
        }
 
        if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
-               unmap_kernel_range((unsigned long)cpu_addr, size);
-               vunmap(cpu_addr);
+               dma_common_free_remap(cpu_addr, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP);
        }
 
        __iommu_remove_mapping(dev, handle, size);