2 * linux/arch/arm/mm/dma-mapping.c
4 * Copyright (C) 2000-2004 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * DMA uncached mapping support.
12 #include <linux/bootmem.h>
13 #include <linux/module.h>
15 #include <linux/genalloc.h>
16 #include <linux/gfp.h>
17 #include <linux/errno.h>
18 #include <linux/list.h>
19 #include <linux/init.h>
20 #include <linux/device.h>
21 #include <linux/dma-mapping.h>
22 #include <linux/dma-contiguous.h>
23 #include <linux/dma-iommu.h>
24 #include <linux/highmem.h>
25 #include <linux/memblock.h>
26 #include <linux/slab.h>
27 #include <linux/iommu.h>
29 #include <linux/vmalloc.h>
30 #include <linux/sizes.h>
31 #include <linux/cma.h>
33 #include <asm/memory.h>
34 #include <asm/highmem.h>
35 #include <asm/cacheflush.h>
36 #include <asm/tlbflush.h>
37 #include <asm/mach/arch.h>
38 #include <asm/mach/map.h>
39 #include <asm/system_info.h>
40 #include <asm/dma-contiguous.h>
46 * arm_dma_map_page - map a portion of a page for streaming DMA
47 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
48 * @page: page that buffer resides in
49 * @offset: offset into page for start of buffer
50 * @size: size of buffer to map
51 * @dir: DMA transfer direction
53 * Ensure that any data held in the cache is appropriately discarded
56 * The device owns this memory once this call has completed. The CPU
57 * can regain ownership by calling dma_unmap_page().
59 static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
60 unsigned long offset, size_t size, enum dma_data_direction dir,
61 struct dma_attrs *attrs)
63 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
64 __dma_page_cpu_to_dev(page, offset, size, dir);
65 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
68 static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
69 unsigned long offset, size_t size, enum dma_data_direction dir,
70 struct dma_attrs *attrs)
72 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
76 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
77 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
78 * @handle: DMA address of buffer
79 * @size: size of buffer (same as passed to dma_map_page)
80 * @dir: DMA transfer direction (same as passed to dma_map_page)
82 * Unmap a page streaming mode DMA translation. The handle and size
83 * must match what was provided in the previous dma_map_page() call.
84 * All other usages are undefined.
86 * After this call, reads by the CPU to the buffer are guaranteed to see
87 * whatever the device wrote there.
89 static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
90 size_t size, enum dma_data_direction dir,
91 struct dma_attrs *attrs)
93 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
94 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
95 handle & ~PAGE_MASK, size, dir);
98 static void arm_dma_sync_single_for_cpu(struct device *dev,
99 dma_addr_t handle, size_t size, enum dma_data_direction dir)
101 unsigned int offset = handle & (PAGE_SIZE - 1);
102 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
103 __dma_page_dev_to_cpu(page, offset, size, dir);
106 static void arm_dma_sync_single_for_device(struct device *dev,
107 dma_addr_t handle, size_t size, enum dma_data_direction dir)
109 unsigned int offset = handle & (PAGE_SIZE - 1);
110 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
111 __dma_page_cpu_to_dev(page, offset, size, dir);
114 struct dma_map_ops arm_dma_ops = {
115 .alloc = arm_dma_alloc,
116 .free = arm_dma_free,
117 .mmap = arm_dma_mmap,
118 .get_sgtable = arm_dma_get_sgtable,
119 .map_page = arm_dma_map_page,
120 .unmap_page = arm_dma_unmap_page,
121 .map_sg = arm_dma_map_sg,
122 .unmap_sg = arm_dma_unmap_sg,
123 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
124 .sync_single_for_device = arm_dma_sync_single_for_device,
125 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
126 .sync_sg_for_device = arm_dma_sync_sg_for_device,
127 .set_dma_mask = arm_dma_set_mask,
129 EXPORT_SYMBOL(arm_dma_ops);
131 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
132 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
133 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
134 dma_addr_t handle, struct dma_attrs *attrs);
135 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
136 void *cpu_addr, dma_addr_t dma_addr, size_t size,
137 struct dma_attrs *attrs);
139 struct dma_map_ops arm_coherent_dma_ops = {
140 .alloc = arm_coherent_dma_alloc,
141 .free = arm_coherent_dma_free,
142 .mmap = arm_coherent_dma_mmap,
143 .get_sgtable = arm_dma_get_sgtable,
144 .map_page = arm_coherent_dma_map_page,
145 .map_sg = arm_dma_map_sg,
146 .set_dma_mask = arm_dma_set_mask,
148 EXPORT_SYMBOL(arm_coherent_dma_ops);
150 static int __dma_supported(struct device *dev, u64 mask, bool warn)
152 unsigned long max_dma_pfn;
155 * If the mask allows for more memory than we can address,
156 * and we actually have that much memory, then we must
157 * indicate that DMA to this device is not supported.
159 if (sizeof(mask) != sizeof(dma_addr_t) &&
160 mask > (dma_addr_t)~0 &&
161 dma_to_pfn(dev, ~0) < max_pfn - 1) {
163 dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
165 dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
170 max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
173 * Translate the device's DMA mask to a PFN limit. This
174 * PFN number includes the page which we can DMA to.
176 if (dma_to_pfn(dev, mask) < max_dma_pfn) {
178 dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
180 dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
188 static u64 get_coherent_dma_mask(struct device *dev)
190 u64 mask = (u64)DMA_BIT_MASK(32);
193 mask = dev->coherent_dma_mask;
196 * Sanity check the DMA mask - it must be non-zero, and
197 * must be able to be satisfied by a DMA allocation.
200 dev_warn(dev, "coherent DMA mask is unset\n");
204 if (!__dma_supported(dev, mask, true))
211 static void __dma_clear_buffer(struct page *page, size_t size)
214 * Ensure that the allocated pages are zeroed, and that any data
215 * lurking in the kernel direct-mapped region is invalidated.
217 if (PageHighMem(page)) {
218 phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
219 phys_addr_t end = base + size;
221 void *ptr = kmap_atomic(page);
222 memset(ptr, 0, PAGE_SIZE);
223 dmac_flush_range(ptr, ptr + PAGE_SIZE);
228 outer_flush_range(base, end);
230 void *ptr = page_address(page);
231 memset(ptr, 0, size);
232 dmac_flush_range(ptr, ptr + size);
233 outer_flush_range(__pa(ptr), __pa(ptr) + size);
238 * Allocate a DMA buffer for 'dev' of size 'size' using the
239 * specified gfp mask. Note that 'size' must be page aligned.
241 static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
243 unsigned long order = get_order(size);
244 struct page *page, *p, *e;
246 page = alloc_pages(gfp, order);
251 * Now split the huge page and free the excess pages
253 split_page(page, order);
254 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
257 __dma_clear_buffer(page, size);
263 * Free a DMA buffer. 'size' must be page aligned.
265 static void __dma_free_buffer(struct page *page, size_t size)
267 struct page *e = page + (size >> PAGE_SHIFT);
277 static void *__alloc_from_contiguous(struct device *dev, size_t size,
278 pgprot_t prot, struct page **ret_page,
279 const void *caller, bool want_vaddr);
281 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
282 pgprot_t prot, struct page **ret_page,
283 const void *caller, bool want_vaddr);
286 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
290 * DMA allocation can be mapped to user space, so lets
291 * set VM_USERMAP flags too.
293 return dma_common_contiguous_remap(page, size,
294 VM_ARM_DMA_CONSISTENT | VM_USERMAP,
298 static void __dma_free_remap(void *cpu_addr, size_t size)
300 dma_common_free_remap(cpu_addr, size,
301 VM_ARM_DMA_CONSISTENT | VM_USERMAP);
304 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
305 static struct gen_pool *atomic_pool;
307 static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
309 static int __init early_coherent_pool(char *p)
311 atomic_pool_size = memparse(p, &p);
314 early_param("coherent_pool", early_coherent_pool);
316 void __init init_dma_coherent_pool_size(unsigned long size)
319 * Catch any attempt to set the pool size too late.
324 * Set architecture specific coherent pool size only if
325 * it has not been changed by kernel command line parameter.
327 if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE)
328 atomic_pool_size = size;
332 * Initialise the coherent pool for atomic allocations.
334 static int __init atomic_pool_init(void)
336 pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
337 gfp_t gfp = GFP_KERNEL | GFP_DMA;
341 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
345 if (dev_get_cma_area(NULL))
346 ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
347 &page, atomic_pool_init, true);
349 ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
350 &page, atomic_pool_init, true);
354 ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
356 atomic_pool_size, -1);
358 goto destroy_genpool;
360 gen_pool_set_algo(atomic_pool,
361 gen_pool_first_fit_order_align,
363 pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n",
364 atomic_pool_size / 1024);
369 gen_pool_destroy(atomic_pool);
372 pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n",
373 atomic_pool_size / 1024);
377 * CMA is activated by core_initcall, so we must be called after it.
379 postcore_initcall(atomic_pool_init);
381 struct dma_contig_early_reserve {
386 static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
388 static int dma_mmu_remap_num __initdata;
390 void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
392 dma_mmu_remap[dma_mmu_remap_num].base = base;
393 dma_mmu_remap[dma_mmu_remap_num].size = size;
397 void __init dma_contiguous_remap(void)
400 for (i = 0; i < dma_mmu_remap_num; i++) {
401 phys_addr_t start = dma_mmu_remap[i].base;
402 phys_addr_t end = start + dma_mmu_remap[i].size;
406 if (end > arm_lowmem_limit)
407 end = arm_lowmem_limit;
411 map.pfn = __phys_to_pfn(start);
412 map.virtual = __phys_to_virt(start);
413 map.length = end - start;
414 map.type = MT_MEMORY_DMA_READY;
417 * Clear previous low-memory mapping to ensure that the
418 * TLB does not see any conflicting entries, then flush
419 * the TLB of the old entries before creating new mappings.
421 * This ensures that any speculatively loaded TLB entries
422 * (even though they may be rare) can not cause any problems,
423 * and ensures that this code is architecturally compliant.
425 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
427 pmd_clear(pmd_off_k(addr));
429 flush_tlb_kernel_range(__phys_to_virt(start),
430 __phys_to_virt(end));
432 iotable_init(&map, 1);
436 static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
439 struct page *page = virt_to_page(addr);
440 pgprot_t prot = *(pgprot_t *)data;
442 set_pte_ext(pte, mk_pte(page, prot), 0);
446 static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
448 unsigned long start = (unsigned long) page_address(page);
449 unsigned end = start + size;
451 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
452 flush_tlb_kernel_range(start, end);
455 static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
456 pgprot_t prot, struct page **ret_page,
457 const void *caller, bool want_vaddr)
461 page = __dma_alloc_buffer(dev, size, gfp);
467 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
469 __dma_free_buffer(page, size);
478 void *arch_alloc_from_atomic_pool(size_t size, struct page **ret_page, gfp_t gfp)
484 WARN(1, "coherent pool not initialised!\n");
488 val = gen_pool_alloc(atomic_pool, size);
490 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
492 *ret_page = phys_to_page(phys);
499 bool arch_in_atomic_pool(void *start, size_t size)
501 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
504 int arch_free_from_atomic_pool(void *start, size_t size)
506 if (!arch_in_atomic_pool(start, size))
509 gen_pool_free(atomic_pool, (unsigned long)start, size);
514 static void *__alloc_from_contiguous(struct device *dev, size_t size,
515 pgprot_t prot, struct page **ret_page,
516 const void *caller, bool want_vaddr)
518 unsigned long order = get_order(size);
519 size_t count = size >> PAGE_SHIFT;
523 page = dma_alloc_from_contiguous(dev, count, order);
527 __dma_clear_buffer(page, size);
532 if (PageHighMem(page)) {
533 ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller);
535 dma_release_from_contiguous(dev, page, count);
539 __dma_remap(page, size, prot);
540 ptr = page_address(page);
548 static void __free_from_contiguous(struct device *dev, struct page *page,
549 void *cpu_addr, size_t size, bool want_vaddr)
552 if (PageHighMem(page))
553 __dma_free_remap(cpu_addr, size);
555 __dma_remap(page, size, PAGE_KERNEL);
557 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
562 #define __alloc_from_pool(size, ret, gfp) arch_alloc_from_atomic_pool(size, ret, gfp)
563 #define __free_from_pool(addr, size) arch_free_from_atomic_pool(addr, size)
564 #define __get_dma_pgprot(attrs, prot, coherent) arch_get_dma_pgprot(attrs, prot, coherent)
566 #else /* !CONFIG_MMU */
570 #define __get_dma_pgprot(attrs, prot, coherent) __pgprot(0)
571 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c, wv) NULL
572 #define __alloc_from_pool(size, ret_page, gfp) NULL
573 #define __alloc_from_contiguous(dev, size, prot, ret, c, wv) NULL
574 #define __free_from_atomic_pool(cpu_addr, size) 0
575 #define __free_from_contiguous(dev, page, cpu_addr, size, wv) do { } while (0)
576 #define __dma_free_remap(cpu_addr, size) do { } while (0)
578 #endif /* CONFIG_MMU */
580 static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
581 struct page **ret_page)
584 page = __dma_alloc_buffer(dev, size, gfp);
589 return page_address(page);
594 static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
595 gfp_t gfp, pgprot_t prot, bool is_coherent,
596 struct dma_attrs *attrs, const void *caller)
598 u64 mask = get_coherent_dma_mask(dev);
599 struct page *page = NULL;
603 #ifdef CONFIG_DMA_API_DEBUG
604 u64 limit = (mask + 1) & ~mask;
605 if (limit && size >= limit) {
606 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
615 if (mask < 0xffffffffULL)
619 * Following is a work-around (a.k.a. hack) to prevent pages
620 * with __GFP_COMP being passed to split_page() which cannot
621 * handle them. The real problem is that this flag probably
622 * should be 0 on ARM as it is not supported on this
623 * platform; see CONFIG_HUGETLBFS.
625 gfp &= ~(__GFP_COMP);
627 *handle = DMA_ERROR_CODE;
628 size = PAGE_ALIGN(size);
629 want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
632 addr = __alloc_simple_buffer(dev, size, gfp, &page);
633 else if (dev_get_cma_area(dev) && (gfp & __GFP_DIRECT_RECLAIM))
634 addr = __alloc_from_contiguous(dev, size, prot, &page,
636 else if (is_coherent)
637 addr = __alloc_simple_buffer(dev, size, gfp, &page);
638 else if (!gfpflags_allow_blocking(gfp))
639 addr = __alloc_from_pool(size, &page, gfp);
641 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page,
645 *handle = pfn_to_dma(dev, page_to_pfn(page));
647 return want_vaddr ? addr : page;
651 * Allocate DMA-coherent memory space and return both the kernel remapped
652 * virtual and bus address for that space.
654 void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
655 gfp_t gfp, struct dma_attrs *attrs)
657 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
659 return __dma_alloc(dev, size, handle, gfp, prot, false,
660 attrs, __builtin_return_address(0));
663 static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
664 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
666 return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
667 attrs, __builtin_return_address(0));
670 static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
671 void *cpu_addr, dma_addr_t dma_addr, size_t size,
672 struct dma_attrs *attrs)
676 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
677 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
678 unsigned long pfn = dma_to_pfn(dev, dma_addr);
679 unsigned long off = vma->vm_pgoff;
681 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
684 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
685 ret = remap_pfn_range(vma, vma->vm_start,
687 vma->vm_end - vma->vm_start,
690 #endif /* CONFIG_MMU */
696 * Create userspace mapping for the DMA-coherent memory.
698 static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
699 void *cpu_addr, dma_addr_t dma_addr, size_t size,
700 struct dma_attrs *attrs)
702 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
705 int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
706 void *cpu_addr, dma_addr_t dma_addr, size_t size,
707 struct dma_attrs *attrs)
710 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
711 #endif /* CONFIG_MMU */
712 return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
716 * Free a buffer as defined by the above mapping.
718 static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
719 dma_addr_t handle, struct dma_attrs *attrs,
722 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
723 bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
725 size = PAGE_ALIGN(size);
728 __dma_free_buffer(page, size);
729 } else if (!is_coherent && __free_from_pool(cpu_addr, size)) {
731 } else if (!dev_get_cma_area(dev)) {
732 if (want_vaddr && !is_coherent)
733 __dma_free_remap(cpu_addr, size);
734 __dma_free_buffer(page, size);
737 * Non-atomic allocations cannot be freed with IRQs disabled
739 WARN_ON(irqs_disabled());
740 __free_from_contiguous(dev, page, cpu_addr, size, want_vaddr);
744 void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
745 dma_addr_t handle, struct dma_attrs *attrs)
747 __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
750 static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
751 dma_addr_t handle, struct dma_attrs *attrs)
753 __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
756 int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
757 void *cpu_addr, dma_addr_t handle, size_t size,
758 struct dma_attrs *attrs)
760 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
763 ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
767 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
771 static void dma_cache_maint_page(struct page *page, unsigned long offset,
772 size_t size, enum dma_data_direction dir,
773 void (*op)(const void *, size_t, int))
778 pfn = page_to_pfn(page) + offset / PAGE_SIZE;
782 * A single sg entry may refer to multiple physically contiguous
783 * pages. But we still need to process highmem pages individually.
784 * If highmem is not configured then the bulk of this loop gets
791 page = pfn_to_page(pfn);
793 if (PageHighMem(page)) {
794 if (len + offset > PAGE_SIZE)
795 len = PAGE_SIZE - offset;
797 if (cache_is_vipt_nonaliasing()) {
798 vaddr = kmap_atomic(page);
799 op(vaddr + offset, len, dir);
800 kunmap_atomic(vaddr);
802 vaddr = kmap_high_get(page);
804 op(vaddr + offset, len, dir);
809 vaddr = page_address(page) + offset;
819 * Make an area consistent for devices.
820 * Note: Drivers should NOT use this function directly, as it will break
821 * platforms with CONFIG_DMABOUNCE.
822 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
824 void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
825 size_t size, enum dma_data_direction dir)
829 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
831 paddr = page_to_phys(page) + off;
832 if (dir == DMA_FROM_DEVICE) {
833 outer_inv_range(paddr, paddr + size);
835 outer_clean_range(paddr, paddr + size);
837 /* FIXME: non-speculating: flush on bidirectional mappings? */
840 void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
841 size_t size, enum dma_data_direction dir)
843 phys_addr_t paddr = page_to_phys(page) + off;
845 /* FIXME: non-speculating: not required */
846 /* in any case, don't bother invalidating if DMA to device */
847 if (dir != DMA_TO_DEVICE) {
848 outer_inv_range(paddr, paddr + size);
850 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
854 * Mark the D-cache clean for these pages to avoid extra flushing.
856 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
860 pfn = page_to_pfn(page) + off / PAGE_SIZE;
864 left -= PAGE_SIZE - off;
866 while (left >= PAGE_SIZE) {
867 page = pfn_to_page(pfn++);
868 set_bit(PG_dcache_clean, &page->flags);
875 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
876 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
877 * @sg: list of buffers
878 * @nents: number of buffers to map
879 * @dir: DMA transfer direction
881 * Map a set of buffers described by scatterlist in streaming mode for DMA.
882 * This is the scatter-gather version of the dma_map_single interface.
883 * Here the scatter gather list elements are each tagged with the
884 * appropriate dma address and length. They are obtained via
885 * sg_dma_{address,length}.
887 * Device ownership issues as mentioned for dma_map_single are the same
890 int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
891 enum dma_data_direction dir, struct dma_attrs *attrs)
893 struct dma_map_ops *ops = get_dma_ops(dev);
894 struct scatterlist *s;
897 for_each_sg(sg, s, nents, i) {
898 #ifdef CONFIG_NEED_SG_DMA_LENGTH
899 s->dma_length = s->length;
901 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
902 s->length, dir, attrs);
903 if (dma_mapping_error(dev, s->dma_address))
909 for_each_sg(sg, s, i, j)
910 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
915 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
916 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
917 * @sg: list of buffers
918 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
919 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
921 * Unmap a set of streaming mode DMA translations. Again, CPU access
922 * rules concerning calls here are the same as for dma_unmap_single().
924 void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
925 enum dma_data_direction dir, struct dma_attrs *attrs)
927 struct dma_map_ops *ops = get_dma_ops(dev);
928 struct scatterlist *s;
932 for_each_sg(sg, s, nents, i)
933 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
937 * arm_dma_sync_sg_for_cpu
938 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
939 * @sg: list of buffers
940 * @nents: number of buffers to map (returned from dma_map_sg)
941 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
943 void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
944 int nents, enum dma_data_direction dir)
946 struct dma_map_ops *ops = get_dma_ops(dev);
947 struct scatterlist *s;
950 for_each_sg(sg, s, nents, i)
951 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
956 * arm_dma_sync_sg_for_device
957 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
958 * @sg: list of buffers
959 * @nents: number of buffers to map (returned from dma_map_sg)
960 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
962 void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
963 int nents, enum dma_data_direction dir)
965 struct dma_map_ops *ops = get_dma_ops(dev);
966 struct scatterlist *s;
969 for_each_sg(sg, s, nents, i)
970 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
975 * Return whether the given device DMA address mask can be supported
976 * properly. For example, if your device can only drive the low 24-bits
977 * during bus mastering, then you would pass 0x00ffffff as the mask
980 int dma_supported(struct device *dev, u64 mask)
982 return __dma_supported(dev, mask, false);
984 EXPORT_SYMBOL(dma_supported);
986 int arm_dma_set_mask(struct device *dev, u64 dma_mask)
988 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
991 *dev->dma_mask = dma_mask;
996 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
998 static int __init dma_debug_do_init(void)
1000 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
1003 fs_initcall(dma_debug_do_init);
1005 static struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
1007 return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
1010 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
1011 struct iommu_ops *iommu, bool coherent)
1013 dev->archdata.dma_coherent = coherent;
1015 if (!common_iommu_setup_dma_ops(dev, dma_base, size, iommu))
1016 arch_set_dma_ops(dev, arm_get_dma_map_ops(coherent));
1019 void arch_teardown_dma_ops(struct device *dev)
1021 common_iommu_teardown_dma_ops(dev);