Since arm64 does not support ISA, there is no need for early swiotlb
initialisation. This patch switches the DMA mapping code to
swiotlb_tlb_late_init_with_default_size(). A side effect of this is that
GFP_DMA is used for the swiotlb buffer and devices with a 32-bit
coherent mask are correctly supported.
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
(cherry picked from commit
3690951fc6d42f3a0903987677d0e592c49dd8db)
Signed-off-by: Ryan Harkin <ryan.harkin@linaro.org>
Conflicts:
arch/arm64/mm/init.c
Signed-off-by: Mark Brown <broonie@linaro.org>
};
EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
-void __init arm64_swiotlb_init(void)
+extern int swiotlb_late_init_with_default_size(size_t default_size);
+
+static int __init swiotlb_late_init(void)
{
+ size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+
dma_ops = &coherent_swiotlb_dma_ops;
- swiotlb_init(1);
+
+ return swiotlb_late_init_with_default_size(swiotlb_size);
}
+subsys_initcall(swiotlb_late_init);
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
- arm64_swiotlb_init();
-
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
#ifndef CONFIG_SPARSEMEM_VMEMMAP