2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/acpi.h>
22 #include <linux/export.h>
23 #include <linux/slab.h>
24 #include <linux/genalloc.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/dma-contiguous.h>
27 #include <linux/dma-iommu.h>
28 #include <linux/vmalloc.h>
29 #include <linux/swiotlb.h>
31 #include <asm/cacheflush.h>
33 static struct gen_pool *atomic_pool;
35 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
36 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
38 static int __init early_coherent_pool(char *p)
40 atomic_pool_size = memparse(p, &p);
43 early_param("coherent_pool", early_coherent_pool);
45 void *arch_alloc_from_atomic_pool(size_t size, struct page **ret_page, gfp_t flags)
51 WARN(1, "coherent pool not initialised!\n");
55 val = gen_pool_alloc(atomic_pool, size);
57 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
59 *ret_page = phys_to_page(phys);
67 bool arch_in_atomic_pool(void *start, size_t size)
69 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
72 int arch_free_from_atomic_pool(void *start, size_t size)
74 if (!arch_in_atomic_pool(start, size))
77 gen_pool_free(atomic_pool, (unsigned long)start, size);
82 static void *__dma_alloc_coherent(struct device *dev, size_t size,
83 dma_addr_t *dma_handle, gfp_t flags,
84 struct dma_attrs *attrs)
87 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
91 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
92 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
94 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
98 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
103 *dma_handle = phys_to_dma(dev, page_to_phys(page));
104 addr = page_address(page);
105 memset(addr, 0, size);
108 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
112 static void __dma_free_coherent(struct device *dev, size_t size,
113 void *vaddr, dma_addr_t dma_handle,
114 struct dma_attrs *attrs)
117 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
120 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
124 freed = dma_release_from_contiguous(dev,
128 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
131 static void *__dma_alloc(struct device *dev, size_t size,
132 dma_addr_t *dma_handle, gfp_t flags,
133 struct dma_attrs *attrs)
136 void *ptr, *coherent_ptr;
137 bool coherent = is_device_dma_coherent(dev);
138 pgprot_t prot = arch_get_dma_pgprot(attrs, PAGE_KERNEL, false);
140 size = PAGE_ALIGN(size);
142 if (!coherent && !gfpflags_allow_blocking(flags)) {
143 struct page *page = NULL;
144 void *addr = arch_alloc_from_atomic_pool(size, &page, flags);
147 *dma_handle = phys_to_dma(dev, page_to_phys(page));
152 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
156 /* no need for non-cacheable mapping if coherent */
160 /* remove any dirty cache lines on the kernel alias */
161 __dma_flush_range(ptr, ptr + size);
163 /* create a coherent mapping */
164 page = virt_to_page(ptr);
165 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
166 prot, __builtin_return_address(0));
173 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
175 *dma_handle = DMA_ERROR_CODE;
179 static void __dma_free(struct device *dev, size_t size,
180 void *vaddr, dma_addr_t dma_handle,
181 struct dma_attrs *attrs)
183 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
185 size = PAGE_ALIGN(size);
187 if (!is_device_dma_coherent(dev)) {
188 if (arch_free_from_atomic_pool(vaddr, size))
192 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
195 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
196 unsigned long offset, size_t size,
197 enum dma_data_direction dir,
198 struct dma_attrs *attrs)
202 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
203 if (!is_device_dma_coherent(dev))
204 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
210 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
211 size_t size, enum dma_data_direction dir,
212 struct dma_attrs *attrs)
214 if (!is_device_dma_coherent(dev))
215 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
216 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
219 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
220 int nelems, enum dma_data_direction dir,
221 struct dma_attrs *attrs)
223 struct scatterlist *sg;
226 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
227 if (!is_device_dma_coherent(dev))
228 for_each_sg(sgl, sg, ret, i)
229 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
235 static void __swiotlb_unmap_sg_attrs(struct device *dev,
236 struct scatterlist *sgl, int nelems,
237 enum dma_data_direction dir,
238 struct dma_attrs *attrs)
240 struct scatterlist *sg;
243 if (!is_device_dma_coherent(dev))
244 for_each_sg(sgl, sg, nelems, i)
245 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
247 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
250 static void __swiotlb_sync_single_for_cpu(struct device *dev,
251 dma_addr_t dev_addr, size_t size,
252 enum dma_data_direction dir)
254 if (!is_device_dma_coherent(dev))
255 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
256 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
259 static void __swiotlb_sync_single_for_device(struct device *dev,
260 dma_addr_t dev_addr, size_t size,
261 enum dma_data_direction dir)
263 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
264 if (!is_device_dma_coherent(dev))
265 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
268 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
269 struct scatterlist *sgl, int nelems,
270 enum dma_data_direction dir)
272 struct scatterlist *sg;
275 if (!is_device_dma_coherent(dev))
276 for_each_sg(sgl, sg, nelems, i)
277 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
279 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
282 static void __swiotlb_sync_sg_for_device(struct device *dev,
283 struct scatterlist *sgl, int nelems,
284 enum dma_data_direction dir)
286 struct scatterlist *sg;
289 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
290 if (!is_device_dma_coherent(dev))
291 for_each_sg(sgl, sg, nelems, i)
292 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
296 static int __swiotlb_mmap(struct device *dev,
297 struct vm_area_struct *vma,
298 void *cpu_addr, dma_addr_t dma_addr, size_t size,
299 struct dma_attrs *attrs)
302 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
304 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
305 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
306 unsigned long off = vma->vm_pgoff;
308 vma->vm_page_prot = arch_get_dma_pgprot(attrs, vma->vm_page_prot,
309 is_device_dma_coherent(dev));
311 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
314 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
315 ret = remap_pfn_range(vma, vma->vm_start,
317 vma->vm_end - vma->vm_start,
324 static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
325 void *cpu_addr, dma_addr_t handle, size_t size,
326 struct dma_attrs *attrs)
328 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
331 sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
332 PAGE_ALIGN(size), 0);
337 static struct dma_map_ops swiotlb_dma_ops = {
338 .alloc = __dma_alloc,
340 .mmap = __swiotlb_mmap,
341 .get_sgtable = __swiotlb_get_sgtable,
342 .map_page = __swiotlb_map_page,
343 .unmap_page = __swiotlb_unmap_page,
344 .map_sg = __swiotlb_map_sg_attrs,
345 .unmap_sg = __swiotlb_unmap_sg_attrs,
346 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
347 .sync_single_for_device = __swiotlb_sync_single_for_device,
348 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
349 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
350 .dma_supported = swiotlb_dma_supported,
351 .mapping_error = swiotlb_dma_mapping_error,
354 static int __init atomic_pool_init(void)
356 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
357 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
360 unsigned int pool_size_order = get_order(atomic_pool_size);
362 if (dev_get_cma_area(NULL))
363 page = dma_alloc_from_contiguous(NULL, nr_pages,
366 page = alloc_pages(GFP_DMA, pool_size_order);
370 void *page_addr = page_address(page);
372 memset(page_addr, 0, atomic_pool_size);
373 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
375 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
379 addr = dma_common_contiguous_remap(page, atomic_pool_size,
380 VM_USERMAP, prot, atomic_pool_init);
383 goto destroy_genpool;
385 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
387 atomic_pool_size, -1);
391 gen_pool_set_algo(atomic_pool,
392 gen_pool_first_fit_order_align,
395 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
396 atomic_pool_size / 1024);
402 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
404 gen_pool_destroy(atomic_pool);
407 if (!dma_release_from_contiguous(NULL, page, nr_pages))
408 __free_pages(page, pool_size_order);
410 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
411 atomic_pool_size / 1024);
415 /********************************************
416 * The following APIs are for dummy DMA ops *
417 ********************************************/
419 static void *__dummy_alloc(struct device *dev, size_t size,
420 dma_addr_t *dma_handle, gfp_t flags,
421 struct dma_attrs *attrs)
426 static void __dummy_free(struct device *dev, size_t size,
427 void *vaddr, dma_addr_t dma_handle,
428 struct dma_attrs *attrs)
432 static int __dummy_mmap(struct device *dev,
433 struct vm_area_struct *vma,
434 void *cpu_addr, dma_addr_t dma_addr, size_t size,
435 struct dma_attrs *attrs)
440 static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
441 unsigned long offset, size_t size,
442 enum dma_data_direction dir,
443 struct dma_attrs *attrs)
445 return DMA_ERROR_CODE;
448 static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
449 size_t size, enum dma_data_direction dir,
450 struct dma_attrs *attrs)
454 static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
455 int nelems, enum dma_data_direction dir,
456 struct dma_attrs *attrs)
461 static void __dummy_unmap_sg(struct device *dev,
462 struct scatterlist *sgl, int nelems,
463 enum dma_data_direction dir,
464 struct dma_attrs *attrs)
468 static void __dummy_sync_single(struct device *dev,
469 dma_addr_t dev_addr, size_t size,
470 enum dma_data_direction dir)
474 static void __dummy_sync_sg(struct device *dev,
475 struct scatterlist *sgl, int nelems,
476 enum dma_data_direction dir)
480 static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
485 static int __dummy_dma_supported(struct device *hwdev, u64 mask)
490 struct dma_map_ops dummy_dma_ops = {
491 .alloc = __dummy_alloc,
492 .free = __dummy_free,
493 .mmap = __dummy_mmap,
494 .map_page = __dummy_map_page,
495 .unmap_page = __dummy_unmap_page,
496 .map_sg = __dummy_map_sg,
497 .unmap_sg = __dummy_unmap_sg,
498 .sync_single_for_cpu = __dummy_sync_single,
499 .sync_single_for_device = __dummy_sync_single,
500 .sync_sg_for_cpu = __dummy_sync_sg,
501 .sync_sg_for_device = __dummy_sync_sg,
502 .mapping_error = __dummy_mapping_error,
503 .dma_supported = __dummy_dma_supported,
505 EXPORT_SYMBOL(dummy_dma_ops);
507 static int __init arm64_dma_init(void)
509 return atomic_pool_init();
511 arch_initcall(arm64_dma_init);
513 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
515 static int __init dma_debug_do_init(void)
517 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
520 fs_initcall(dma_debug_do_init);
522 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
523 struct iommu_ops *iommu, bool coherent)
525 dev->archdata.dma_coherent = coherent;
527 if (!common_iommu_setup_dma_ops(dev, dma_base, size, iommu))
528 arch_set_dma_ops(dev, &swiotlb_dma_ops);
531 void arch_teardown_dma_ops(struct device *dev)
533 common_iommu_teardown_dma_ops(dev);