2 * SWIOTLB-based DMA API implementation
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/gfp.h>
21 #include <linux/export.h>
22 #include <linux/slab.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/vmalloc.h>
25 #include <linux/swiotlb.h>
27 #include <asm/cacheflush.h>
29 struct dma_map_ops *dma_ops;
30 EXPORT_SYMBOL(dma_ops);
32 static void *__dma_alloc_coherent(struct device *dev, size_t size,
33 dma_addr_t *dma_handle, gfp_t flags,
34 struct dma_attrs *attrs)
36 if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
37 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
39 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
42 static void __dma_free_coherent(struct device *dev, size_t size,
43 void *vaddr, dma_addr_t dma_handle,
44 struct dma_attrs *attrs)
46 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
49 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
50 dma_addr_t *dma_handle, gfp_t flags,
51 struct dma_attrs *attrs)
53 struct page *page, **map;
54 void *ptr, *coherent_ptr;
57 size = PAGE_ALIGN(size);
58 order = get_order(size);
60 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
63 map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
67 /* remove any dirty cache lines on the kernel alias */
68 __dma_flush_range(ptr, ptr + size);
70 /* create a coherent mapping */
71 page = virt_to_page(ptr);
72 for (i = 0; i < (size >> PAGE_SHIFT); i++)
74 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
75 pgprot_dmacoherent(pgprot_default));
83 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
89 static void __dma_free_noncoherent(struct device *dev, size_t size,
90 void *vaddr, dma_addr_t dma_handle,
91 struct dma_attrs *attrs)
93 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
96 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
99 static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
100 unsigned long offset, size_t size,
101 enum dma_data_direction dir,
102 struct dma_attrs *attrs)
106 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
107 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
113 static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
114 size_t size, enum dma_data_direction dir,
115 struct dma_attrs *attrs)
117 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
118 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
121 static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
122 int nelems, enum dma_data_direction dir,
123 struct dma_attrs *attrs)
125 struct scatterlist *sg;
128 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
129 for_each_sg(sgl, sg, ret, i)
130 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
136 static void __swiotlb_unmap_sg_attrs(struct device *dev,
137 struct scatterlist *sgl, int nelems,
138 enum dma_data_direction dir,
139 struct dma_attrs *attrs)
141 struct scatterlist *sg;
144 for_each_sg(sgl, sg, nelems, i)
145 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
147 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
150 static void __swiotlb_sync_single_for_cpu(struct device *dev,
151 dma_addr_t dev_addr, size_t size,
152 enum dma_data_direction dir)
154 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
155 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
158 static void __swiotlb_sync_single_for_device(struct device *dev,
159 dma_addr_t dev_addr, size_t size,
160 enum dma_data_direction dir)
162 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
163 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
166 static void __swiotlb_sync_sg_for_cpu(struct device *dev,
167 struct scatterlist *sgl, int nelems,
168 enum dma_data_direction dir)
170 struct scatterlist *sg;
173 for_each_sg(sgl, sg, nelems, i)
174 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
176 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
179 static void __swiotlb_sync_sg_for_device(struct device *dev,
180 struct scatterlist *sgl, int nelems,
181 enum dma_data_direction dir)
183 struct scatterlist *sg;
186 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
187 for_each_sg(sgl, sg, nelems, i)
188 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
192 struct dma_map_ops noncoherent_swiotlb_dma_ops = {
193 .alloc = __dma_alloc_noncoherent,
194 .free = __dma_free_noncoherent,
195 .map_page = __swiotlb_map_page,
196 .unmap_page = __swiotlb_unmap_page,
197 .map_sg = __swiotlb_map_sg_attrs,
198 .unmap_sg = __swiotlb_unmap_sg_attrs,
199 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
200 .sync_single_for_device = __swiotlb_sync_single_for_device,
201 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
202 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
203 .dma_supported = swiotlb_dma_supported,
204 .mapping_error = swiotlb_dma_mapping_error,
206 EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
208 struct dma_map_ops coherent_swiotlb_dma_ops = {
209 .alloc = __dma_alloc_coherent,
210 .free = __dma_free_coherent,
211 .map_page = swiotlb_map_page,
212 .unmap_page = swiotlb_unmap_page,
213 .map_sg = swiotlb_map_sg_attrs,
214 .unmap_sg = swiotlb_unmap_sg_attrs,
215 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
216 .sync_single_for_device = swiotlb_sync_single_for_device,
217 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
218 .sync_sg_for_device = swiotlb_sync_sg_for_device,
219 .dma_supported = swiotlb_dma_supported,
220 .mapping_error = swiotlb_dma_mapping_error,
222 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
224 void __init arm64_swiotlb_init(void)
226 dma_ops = &coherent_swiotlb_dma_ops;
230 #define PREALLOC_DMA_DEBUG_ENTRIES 4096
232 static int __init dma_debug_do_init(void)
234 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
237 fs_initcall(dma_debug_do_init);