#ifdef CONFIG_MMU
#ifdef CONFIG_HUGETLB_PAGE
-#error ARM Coherent DMA allocator does not (yet) support huge TLB
+#warning ARM Coherent DMA allocator does not (yet) support huge TLB
#endif
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
+#ifdef CONFIG_ARCH_ROCKCHIP
+ struct dma_attrs *attrs,
+#endif
const void *caller);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
{
- struct vm_struct *area;
- unsigned long addr;
-
/*
* DMA allocation can be mapped to user space, so lets
* set VM_USERMAP flags too.
*/
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
- caller);
- if (!area)
- return NULL;
- addr = (unsigned long)area->addr;
- area->phys_addr = __pfn_to_phys(page_to_pfn(page));
-
- if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
- vunmap((void *)addr);
- return NULL;
- }
- return (void *)addr;
+ return dma_common_contiguous_remap(page, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+ prot, caller);
}
static void __dma_free_remap(void *cpu_addr, size_t size)
{
- unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
- struct vm_struct *area = find_vm_area(cpu_addr);
- if (!area || (area->flags & flags) != flags) {
- WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
- return;
- }
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+ dma_common_free_remap(cpu_addr, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP);
}
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
if (IS_ENABLED(CONFIG_DMA_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
+#ifdef CONFIG_ARCH_ROCKCHIP
+ NULL,
+#endif
atomic_pool_init);
else
ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
map.type = MT_MEMORY_DMA_READY;
/*
- * Clear previous low-memory mapping
+ * Clear previous low-memory mapping to ensure that the
+ * TLB does not see any conflicting entries, then flush
+ * the TLB of the old entries before creating new mappings.
+ *
+ * This ensures that any speculatively loaded TLB entries
+ * (even though they may be rare) can not cause any problems,
+ * and ensures that this code is architecturally compliant.
*/
for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
+ flush_tlb_kernel_range(__phys_to_virt(start),
+ __phys_to_virt(end));
+
iotable_init(&map, 1);
}
}
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
+#ifdef CONFIG_ARCH_ROCKCHIP
+ struct dma_attrs *attrs,
+#endif
const void *caller)
{
unsigned long order = get_order(size);
__dma_clear_buffer(page, size);
+#ifdef CONFIG_ARCH_ROCKCHIP
+ if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ return (*ret_page=page);
+#endif
+
if (PageHighMem(page)) {
ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
if (!ptr) {
return ptr;
}
+#ifdef CONFIG_ARCH_ROCKCHIP
+static void __free_from_contiguous(struct device *dev, struct page *page,
+ void *cpu_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ if (PageHighMem(page))
+ __dma_free_remap(cpu_addr, size);
+ else
+ __dma_remap(page, size, pgprot_kernel);
+ }
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+}
+#else
static void __free_from_contiguous(struct device *dev, struct page *page,
void *cpu_addr, size_t size)
{
__dma_remap(page, size, pgprot_kernel);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
+#endif
static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
{
#define __get_dma_pgprot(attrs, prot) __pgprot(0)
#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
#define __alloc_from_pool(size, ret_page) NULL
+#ifdef CONFIG_ARCH_ROCKCHIP
+#define __alloc_from_contiguous(dev, size, prot, ret, attrs, c) NULL
+#else
#define __alloc_from_contiguous(dev, size, prot, ret, c) NULL
+#endif
#define __free_from_pool(cpu_addr, size) 0
+#ifdef CONFIG_ARCH_ROCKCHIP
+#define __free_from_contiguous(dev, page, cpu_addr, size, attrs) do { } while (0)
+#else
#define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0)
+#endif
#define __dma_free_remap(cpu_addr, size) do { } while (0)
#endif /* CONFIG_MMU */
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+#ifdef CONFIG_ARCH_ROCKCHIP
+ gfp_t gfp, pgprot_t prot, bool is_coherent,
+ struct dma_attrs *attrs, const void *caller)
+#else
gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller)
+#endif
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page = NULL;
else if (!IS_ENABLED(CONFIG_DMA_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
+#ifdef CONFIG_ARCH_ROCKCHIP
+ addr = __alloc_from_contiguous(dev, size, prot, &page, attrs, caller);
+#else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
+#endif
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, false,
+#ifdef CONFIG_ARCH_ROCKCHIP
+ attrs,
+#endif
__builtin_return_address(0));
}
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, true,
+#ifdef CONFIG_ARCH_ROCKCHIP
+ attrs,
+#endif
__builtin_return_address(0));
}
* Non-atomic allocations cannot be freed with IRQs disabled
*/
WARN_ON(irqs_disabled());
+#ifdef CONFIG_ARCH_ROCKCHIP
+ __free_from_contiguous(dev, page, cpu_addr, size, attrs);
+#else
__free_from_contiguous(dev, page, cpu_addr, size);
+#endif
}
}
__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
const void *caller)
{
- unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct vm_struct *area;
- unsigned long p;
-
- area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
- caller);
- if (!area)
- return NULL;
-
- area->pages = pages;
- area->nr_pages = nr_pages;
- p = (unsigned long)area->addr;
-
- for (i = 0; i < nr_pages; i++) {
- phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
- if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
- goto err;
- p += PAGE_SIZE;
- }
- return area->addr;
-err:
- unmap_kernel_range((unsigned long)area->addr, size);
- vunmap(area->addr);
+ return dma_common_pages_remap(pages, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
return NULL;
}
}
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
- unmap_kernel_range((unsigned long)cpu_addr, size);
- vunmap(cpu_addr);
+ dma_common_free_remap(cpu_addr, size,
+ VM_ARM_DMA_CONSISTENT | VM_USERMAP);
}
__iommu_remove_mapping(dev, handle, size);