Merge commit 'v3.6-rc5' into pci/gavin-window-alignment
[firefly-linux-kernel-4.4.55.git] / arch / powerpc / kernel / dma.c
1 /*
2  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
3  *
4  * Provide default implementations of the DMA mapping callbacks for
5  * directly mapped busses.
6  */
7
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
15 #include <asm/vio.h>
16 #include <asm/bug.h>
17 #include <asm/abs_addr.h>
18 #include <asm/machdep.h>
19
20 /*
21  * Generic direct DMA implementation
22  *
23  * This implementation supports a per-device offset that can be applied if
24  * the address at which memory is visible to devices is not 0. Platform code
25  * can set archdata.dma_data to an unsigned long holding the offset. By
26  * default the offset is PCI_DRAM_OFFSET.
27  */
28
29
30 void *dma_direct_alloc_coherent(struct device *dev, size_t size,
31                                 dma_addr_t *dma_handle, gfp_t flag,
32                                 struct dma_attrs *attrs)
33 {
34         void *ret;
35 #ifdef CONFIG_NOT_COHERENT_CACHE
36         ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
37         if (ret == NULL)
38                 return NULL;
39         *dma_handle += get_dma_offset(dev);
40         return ret;
41 #else
42         struct page *page;
43         int node = dev_to_node(dev);
44
45         /* ignore region specifiers */
46         flag  &= ~(__GFP_HIGHMEM);
47
48         page = alloc_pages_node(node, flag, get_order(size));
49         if (page == NULL)
50                 return NULL;
51         ret = page_address(page);
52         memset(ret, 0, size);
53         *dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
54
55         return ret;
56 #endif
57 }
58
59 void dma_direct_free_coherent(struct device *dev, size_t size,
60                               void *vaddr, dma_addr_t dma_handle,
61                               struct dma_attrs *attrs)
62 {
63 #ifdef CONFIG_NOT_COHERENT_CACHE
64         __dma_free_coherent(size, vaddr);
65 #else
66         free_pages((unsigned long)vaddr, get_order(size));
67 #endif
68 }
69
70 int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
71                              void *cpu_addr, dma_addr_t handle, size_t size,
72                              struct dma_attrs *attrs)
73 {
74         unsigned long pfn;
75
76 #ifdef CONFIG_NOT_COHERENT_CACHE
77         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
78         pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
79 #else
80         pfn = page_to_pfn(virt_to_page(cpu_addr));
81 #endif
82         return remap_pfn_range(vma, vma->vm_start,
83                                pfn + vma->vm_pgoff,
84                                vma->vm_end - vma->vm_start,
85                                vma->vm_page_prot);
86 }
87
88 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
89                              int nents, enum dma_data_direction direction,
90                              struct dma_attrs *attrs)
91 {
92         struct scatterlist *sg;
93         int i;
94
95         for_each_sg(sgl, sg, nents, i) {
96                 sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
97                 sg->dma_length = sg->length;
98                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
99         }
100
101         return nents;
102 }
103
104 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
105                                 int nents, enum dma_data_direction direction,
106                                 struct dma_attrs *attrs)
107 {
108 }
109
110 static int dma_direct_dma_supported(struct device *dev, u64 mask)
111 {
112 #ifdef CONFIG_PPC64
113         /* Could be improved so platforms can set the limit in case
114          * they have limited DMA windows
115          */
116         return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
117 #else
118         return 1;
119 #endif
120 }
121
122 static u64 dma_direct_get_required_mask(struct device *dev)
123 {
124         u64 end, mask;
125
126         end = memblock_end_of_DRAM() + get_dma_offset(dev);
127
128         mask = 1ULL << (fls64(end) - 1);
129         mask += mask - 1;
130
131         return mask;
132 }
133
134 static inline dma_addr_t dma_direct_map_page(struct device *dev,
135                                              struct page *page,
136                                              unsigned long offset,
137                                              size_t size,
138                                              enum dma_data_direction dir,
139                                              struct dma_attrs *attrs)
140 {
141         BUG_ON(dir == DMA_NONE);
142         __dma_sync_page(page, offset, size, dir);
143         return page_to_phys(page) + offset + get_dma_offset(dev);
144 }
145
146 static inline void dma_direct_unmap_page(struct device *dev,
147                                          dma_addr_t dma_address,
148                                          size_t size,
149                                          enum dma_data_direction direction,
150                                          struct dma_attrs *attrs)
151 {
152 }
153
154 #ifdef CONFIG_NOT_COHERENT_CACHE
155 static inline void dma_direct_sync_sg(struct device *dev,
156                 struct scatterlist *sgl, int nents,
157                 enum dma_data_direction direction)
158 {
159         struct scatterlist *sg;
160         int i;
161
162         for_each_sg(sgl, sg, nents, i)
163                 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
164 }
165
166 static inline void dma_direct_sync_single(struct device *dev,
167                                           dma_addr_t dma_handle, size_t size,
168                                           enum dma_data_direction direction)
169 {
170         __dma_sync(bus_to_virt(dma_handle), size, direction);
171 }
172 #endif
173
174 struct dma_map_ops dma_direct_ops = {
175         .alloc                          = dma_direct_alloc_coherent,
176         .free                           = dma_direct_free_coherent,
177         .mmap                           = dma_direct_mmap_coherent,
178         .map_sg                         = dma_direct_map_sg,
179         .unmap_sg                       = dma_direct_unmap_sg,
180         .dma_supported                  = dma_direct_dma_supported,
181         .map_page                       = dma_direct_map_page,
182         .unmap_page                     = dma_direct_unmap_page,
183         .get_required_mask              = dma_direct_get_required_mask,
184 #ifdef CONFIG_NOT_COHERENT_CACHE
185         .sync_single_for_cpu            = dma_direct_sync_single,
186         .sync_single_for_device         = dma_direct_sync_single,
187         .sync_sg_for_cpu                = dma_direct_sync_sg,
188         .sync_sg_for_device             = dma_direct_sync_sg,
189 #endif
190 };
191 EXPORT_SYMBOL(dma_direct_ops);
192
193 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
194
195 int dma_set_mask(struct device *dev, u64 dma_mask)
196 {
197         struct dma_map_ops *dma_ops = get_dma_ops(dev);
198
199         if (ppc_md.dma_set_mask)
200                 return ppc_md.dma_set_mask(dev, dma_mask);
201         if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
202                 return dma_ops->set_dma_mask(dev, dma_mask);
203         if (!dev->dma_mask || !dma_supported(dev, dma_mask))
204                 return -EIO;
205         *dev->dma_mask = dma_mask;
206         return 0;
207 }
208 EXPORT_SYMBOL(dma_set_mask);
209
210 u64 dma_get_required_mask(struct device *dev)
211 {
212         struct dma_map_ops *dma_ops = get_dma_ops(dev);
213
214         if (ppc_md.dma_get_required_mask)
215                 return ppc_md.dma_get_required_mask(dev);
216
217         if (unlikely(dma_ops == NULL))
218                 return 0;
219
220         if (dma_ops->get_required_mask)
221                 return dma_ops->get_required_mask(dev);
222
223         return DMA_BIT_MASK(8 * sizeof(dma_addr_t));
224 }
225 EXPORT_SYMBOL_GPL(dma_get_required_mask);
226
227 static int __init dma_init(void)
228 {
229         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
230 #ifdef CONFIG_PCI
231         dma_debug_add_bus(&pci_bus_type);
232 #endif
233 #ifdef CONFIG_IBMVIO
234         dma_debug_add_bus(&vio_bus_type);
235 #endif
236
237        return 0;
238 }
239 fs_initcall(dma_init);
240