2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
17 #include <asm/abs_addr.h>
18 #include <asm/machdep.h>
21 * Generic direct DMA implementation
23 * This implementation supports a per-device offset that can be applied if
24 * the address at which memory is visible to devices is not 0. Platform code
25 * can set archdata.dma_data to an unsigned long holding the offset. By
26 * default the offset is PCI_DRAM_OFFSET.
30 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
31 dma_addr_t *dma_handle, gfp_t flag,
32 struct dma_attrs *attrs)
35 #ifdef CONFIG_NOT_COHERENT_CACHE
36 ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
39 *dma_handle += get_dma_offset(dev);
43 int node = dev_to_node(dev);
45 /* ignore region specifiers */
46 flag &= ~(__GFP_HIGHMEM);
48 page = alloc_pages_node(node, flag, get_order(size));
51 ret = page_address(page);
53 *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
59 void dma_direct_free_coherent(struct device *dev, size_t size,
60 void *vaddr, dma_addr_t dma_handle,
61 struct dma_attrs *attrs)
63 #ifdef CONFIG_NOT_COHERENT_CACHE
64 __dma_free_coherent(size, vaddr);
66 free_pages((unsigned long)vaddr, get_order(size));
70 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
71 int nents, enum dma_data_direction direction,
72 struct dma_attrs *attrs)
74 struct scatterlist *sg;
77 for_each_sg(sgl, sg, nents, i) {
78 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
79 sg->dma_length = sg->length;
80 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
86 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
87 int nents, enum dma_data_direction direction,
88 struct dma_attrs *attrs)
92 static int dma_direct_dma_supported(struct device *dev, u64 mask)
95 /* Could be improved so platforms can set the limit in case
96 * they have limited DMA windows
98 return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
104 static u64 dma_direct_get_required_mask(struct device *dev)
108 end = memblock_end_of_DRAM() + get_dma_offset(dev);
110 mask = 1ULL << (fls64(end) - 1);
116 static inline dma_addr_t dma_direct_map_page(struct device *dev,
118 unsigned long offset,
120 enum dma_data_direction dir,
121 struct dma_attrs *attrs)
123 BUG_ON(dir == DMA_NONE);
124 __dma_sync_page(page, offset, size, dir);
125 return page_to_phys(page) + offset + get_dma_offset(dev);
128 static inline void dma_direct_unmap_page(struct device *dev,
129 dma_addr_t dma_address,
131 enum dma_data_direction direction,
132 struct dma_attrs *attrs)
136 #ifdef CONFIG_NOT_COHERENT_CACHE
137 static inline void dma_direct_sync_sg(struct device *dev,
138 struct scatterlist *sgl, int nents,
139 enum dma_data_direction direction)
141 struct scatterlist *sg;
144 for_each_sg(sgl, sg, nents, i)
145 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
148 static inline void dma_direct_sync_single(struct device *dev,
149 dma_addr_t dma_handle, size_t size,
150 enum dma_data_direction direction)
152 __dma_sync(bus_to_virt(dma_handle), size, direction);
156 struct dma_map_ops dma_direct_ops = {
157 .alloc = dma_direct_alloc_coherent,
158 .free = dma_direct_free_coherent,
159 .map_sg = dma_direct_map_sg,
160 .unmap_sg = dma_direct_unmap_sg,
161 .dma_supported = dma_direct_dma_supported,
162 .map_page = dma_direct_map_page,
163 .unmap_page = dma_direct_unmap_page,
164 .get_required_mask = dma_direct_get_required_mask,
165 #ifdef CONFIG_NOT_COHERENT_CACHE
166 .sync_single_for_cpu = dma_direct_sync_single,
167 .sync_single_for_device = dma_direct_sync_single,
168 .sync_sg_for_cpu = dma_direct_sync_sg,
169 .sync_sg_for_device = dma_direct_sync_sg,
172 EXPORT_SYMBOL(dma_direct_ops);
174 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
176 int dma_set_mask(struct device *dev, u64 dma_mask)
178 struct dma_map_ops *dma_ops = get_dma_ops(dev);
180 if (ppc_md.dma_set_mask)
181 return ppc_md.dma_set_mask(dev, dma_mask);
182 if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
183 return dma_ops->set_dma_mask(dev, dma_mask);
184 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
186 *dev->dma_mask = dma_mask;
189 EXPORT_SYMBOL(dma_set_mask);
191 u64 dma_get_required_mask(struct device *dev)
193 struct dma_map_ops *dma_ops = get_dma_ops(dev);
195 if (ppc_md.dma_get_required_mask)
196 return ppc_md.dma_get_required_mask(dev);
198 if (unlikely(dma_ops == NULL))
201 if (dma_ops->get_required_mask)
202 return dma_ops->get_required_mask(dev);
204 return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
206 EXPORT_SYMBOL_GPL(dma_get_required_mask);
208 static int __init dma_init(void)
210 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
212 dma_debug_add_bus(&pci_bus_type);
215 dma_debug_add_bus(&vio_bus_type);
220 fs_initcall(dma_init);
222 int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
223 void *cpu_addr, dma_addr_t handle, size_t size)
227 #ifdef CONFIG_NOT_COHERENT_CACHE
228 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
229 pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
231 pfn = page_to_pfn(virt_to_page(cpu_addr));
233 return remap_pfn_range(vma, vma->vm_start,
235 vma->vm_end - vma->vm_start,
238 EXPORT_SYMBOL_GPL(dma_mmap_coherent);