microblaze: Cleanup PCI_DRAM_OFFSET handling
[firefly-linux-kernel-4.4.55.git] / arch / microblaze / kernel / dma.c
1 /*
2  * Copyright (C) 2009-2010 PetaLogix
3  * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4  *
5  * Provide default implementations of the DMA mapping callbacks for
6  * directly mapped busses.
7  */
8
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
13 #include <linux/export.h>
14 #include <linux/bug.h>
15
16 #define NOT_COHERENT_CACHE
17
18 static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
19                                        dma_addr_t *dma_handle, gfp_t flag,
20                                        struct dma_attrs *attrs)
21 {
22 #ifdef NOT_COHERENT_CACHE
23         return consistent_alloc(flag, size, dma_handle);
24 #else
25         void *ret;
26         struct page *page;
27         int node = dev_to_node(dev);
28
29         /* ignore region specifiers */
30         flag  &= ~(__GFP_HIGHMEM);
31
32         page = alloc_pages_node(node, flag, get_order(size));
33         if (page == NULL)
34                 return NULL;
35         ret = page_address(page);
36         memset(ret, 0, size);
37         *dma_handle = virt_to_phys(ret);
38
39         return ret;
40 #endif
41 }
42
43 static void dma_direct_free_coherent(struct device *dev, size_t size,
44                                      void *vaddr, dma_addr_t dma_handle,
45                                      struct dma_attrs *attrs)
46 {
47 #ifdef NOT_COHERENT_CACHE
48         consistent_free(size, vaddr);
49 #else
50         free_pages((unsigned long)vaddr, get_order(size));
51 #endif
52 }
53
54 static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
55                              int nents, enum dma_data_direction direction,
56                              struct dma_attrs *attrs)
57 {
58         struct scatterlist *sg;
59         int i;
60
61         /* FIXME this part of code is untested */
62         for_each_sg(sgl, sg, nents, i) {
63                 sg->dma_address = sg_phys(sg);
64                 __dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
65                                                         sg->length, direction);
66         }
67
68         return nents;
69 }
70
71 static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
72                                 int nents, enum dma_data_direction direction,
73                                 struct dma_attrs *attrs)
74 {
75 }
76
77 static int dma_direct_dma_supported(struct device *dev, u64 mask)
78 {
79         return 1;
80 }
81
82 static inline dma_addr_t dma_direct_map_page(struct device *dev,
83                                              struct page *page,
84                                              unsigned long offset,
85                                              size_t size,
86                                              enum dma_data_direction direction,
87                                              struct dma_attrs *attrs)
88 {
89         __dma_sync(page_to_phys(page) + offset, size, direction);
90         return page_to_phys(page) + offset;
91 }
92
93 static inline void dma_direct_unmap_page(struct device *dev,
94                                          dma_addr_t dma_address,
95                                          size_t size,
96                                          enum dma_data_direction direction,
97                                          struct dma_attrs *attrs)
98 {
99 /* There is not necessary to do cache cleanup
100  *
101  * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
102  * dma_address is physical address
103  */
104         __dma_sync(dma_address, size, direction);
105 }
106
107 static inline void
108 dma_direct_sync_single_for_cpu(struct device *dev,
109                                dma_addr_t dma_handle, size_t size,
110                                enum dma_data_direction direction)
111 {
112         /*
113          * It's pointless to flush the cache as the memory segment
114          * is given to the CPU
115          */
116
117         if (direction == DMA_FROM_DEVICE)
118                 __dma_sync(dma_handle, size, direction);
119 }
120
121 static inline void
122 dma_direct_sync_single_for_device(struct device *dev,
123                                   dma_addr_t dma_handle, size_t size,
124                                   enum dma_data_direction direction)
125 {
126         /*
127          * It's pointless to invalidate the cache if the device isn't
128          * supposed to write to the relevant region
129          */
130
131         if (direction == DMA_TO_DEVICE)
132                 __dma_sync(dma_handle, size, direction);
133 }
134
135 static inline void
136 dma_direct_sync_sg_for_cpu(struct device *dev,
137                            struct scatterlist *sgl, int nents,
138                            enum dma_data_direction direction)
139 {
140         struct scatterlist *sg;
141         int i;
142
143         /* FIXME this part of code is untested */
144         if (direction == DMA_FROM_DEVICE)
145                 for_each_sg(sgl, sg, nents, i)
146                         __dma_sync(sg->dma_address, sg->length, direction);
147 }
148
149 static inline void
150 dma_direct_sync_sg_for_device(struct device *dev,
151                               struct scatterlist *sgl, int nents,
152                               enum dma_data_direction direction)
153 {
154         struct scatterlist *sg;
155         int i;
156
157         /* FIXME this part of code is untested */
158         if (direction == DMA_TO_DEVICE)
159                 for_each_sg(sgl, sg, nents, i)
160                         __dma_sync(sg->dma_address, sg->length, direction);
161 }
162
163 struct dma_map_ops dma_direct_ops = {
164         .alloc          = dma_direct_alloc_coherent,
165         .free           = dma_direct_free_coherent,
166         .map_sg         = dma_direct_map_sg,
167         .unmap_sg       = dma_direct_unmap_sg,
168         .dma_supported  = dma_direct_dma_supported,
169         .map_page       = dma_direct_map_page,
170         .unmap_page     = dma_direct_unmap_page,
171         .sync_single_for_cpu            = dma_direct_sync_single_for_cpu,
172         .sync_single_for_device         = dma_direct_sync_single_for_device,
173         .sync_sg_for_cpu                = dma_direct_sync_sg_for_cpu,
174         .sync_sg_for_device             = dma_direct_sync_sg_for_device,
175 };
176 EXPORT_SYMBOL(dma_direct_ops);
177
178 /* Number of entries preallocated for DMA-API debugging */
179 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
180
181 static int __init dma_init(void)
182 {
183         dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
184
185         return 0;
186 }
187 fs_initcall(dma_init);