2 * arch/ubicom32/include/asm/dma-mapping.h
3 * Generic dma-mapping.h for Ubicom32 architecture.
5 * (C) Copyright 2009, Ubicom, Inc.
7 * This file is part of the Ubicom32 Linux Kernel Port.
9 * The Ubicom32 Linux Kernel Port is free software: you can redistribute
10 * it and/or modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation, either version 2 of the
12 * License, or (at your option) any later version.
14 * The Ubicom32 Linux Kernel Port is distributed in the hope that it
15 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
16 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
17 * the GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with the Ubicom32 Linux Kernel Port. If not,
21 * see <http://www.gnu.org/licenses/>.
23 * Ubicom32 implementation derived from (with many thanks):
28 #ifndef _ASM_UBICOM32_DMA_MAPPING_H
29 #define _ASM_UBICOM32_DMA_MAPPING_H
31 #include <linux/scatterlist.h>
34 /* we implement the API below in terms of the existing PCI one,
36 #include <linux/pci.h>
37 /* need struct page definitions */
41 dma_supported(struct device *dev, u64 mask)
43 BUG_ON(dev->bus != &pci_bus_type);
45 return pci_dma_supported(to_pci_dev(dev), mask);
49 dma_set_mask(struct device *dev, u64 dma_mask)
51 BUG_ON(dev->bus != &pci_bus_type);
53 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
57 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
60 BUG_ON(dev->bus != &pci_bus_type);
62 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
66 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
67 dma_addr_t dma_handle)
69 BUG_ON(dev->bus != &pci_bus_type);
71 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
74 static inline dma_addr_t
75 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
76 enum dma_data_direction direction)
78 BUG_ON(dev->bus != &pci_bus_type);
80 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
84 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
85 enum dma_data_direction direction)
87 BUG_ON(dev->bus != &pci_bus_type);
89 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
92 static inline dma_addr_t
93 dma_map_page(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction direction)
97 BUG_ON(dev->bus != &pci_bus_type);
99 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
103 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
104 enum dma_data_direction direction)
106 BUG_ON(dev->bus != &pci_bus_type);
108 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
112 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
113 enum dma_data_direction direction)
115 BUG_ON(dev->bus != &pci_bus_type);
117 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
121 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
122 enum dma_data_direction direction)
124 BUG_ON(dev->bus != &pci_bus_type);
126 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
130 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
131 enum dma_data_direction direction)
133 BUG_ON(dev->bus != &pci_bus_type);
135 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
136 size, (int)direction);
140 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
141 enum dma_data_direction direction)
143 BUG_ON(dev->bus != &pci_bus_type);
145 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
146 size, (int)direction);
150 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
151 enum dma_data_direction direction)
153 BUG_ON(dev->bus != &pci_bus_type);
155 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
159 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
160 enum dma_data_direction direction)
162 BUG_ON(dev->bus != &pci_bus_type);
164 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
168 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
170 return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
177 dma_supported(struct device *dev, u64 mask)
183 dma_set_mask(struct device *dev, u64 dma_mask)
190 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
198 dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
199 dma_addr_t dma_handle)
204 static inline dma_addr_t
205 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
206 enum dma_data_direction direction)
213 dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
214 enum dma_data_direction direction)
219 static inline dma_addr_t
220 dma_map_page(struct device *dev, struct page *page,
221 unsigned long offset, size_t size,
222 enum dma_data_direction direction)
229 dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
230 enum dma_data_direction direction)
236 dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
237 enum dma_data_direction direction)
244 dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
245 enum dma_data_direction direction)
251 dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
252 enum dma_data_direction direction)
258 dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
259 enum dma_data_direction direction)
265 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
266 enum dma_data_direction direction)
272 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
273 enum dma_data_direction direction)
279 dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
286 /* Now for the API extensions over the pci_ one */
288 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
289 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
290 #define dma_is_consistent(d, h) (1)
293 dma_get_cache_alignment(void)
295 /* no easy way to get cache size on all processors, so return
296 * the maximum possible, to be safe */
297 return (1 << INTERNODE_CACHE_SHIFT);
301 dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
302 unsigned long offset, size_t size,
303 enum dma_data_direction direction)
305 /* just sync everything, that's all the pci API can do */
306 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
310 dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
311 unsigned long offset, size_t size,
312 enum dma_data_direction direction)
314 /* just sync everything, that's all the pci API can do */
315 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
319 dma_cache_sync(struct device *dev, void *vaddr, size_t size,
320 enum dma_data_direction direction)
322 /* could define this in terms of the dma_cache ... operations,
323 * but if you get this on a platform, you should convert the platform
324 * to using the generic device DMA API */
328 #endif /* _ASM_UBICOM32_DMA_MAPPING_H */