gpu: ion: ion_system_heap: Fix bug preventing compilation
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_system_heap.c
1 /*
2  * drivers/staging/android/ion/ion_system_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include "ion.h"
26 #include "ion_priv.h"
27
28 struct page_info {
29         struct page *page;
30         unsigned long order;
31         struct list_head list;
32 };
33
34 static unsigned int orders[] = {8, 4, 0};
35
36 static struct page_info *alloc_largest_available(unsigned long size,
37                                                  bool split_pages,
38                                                  unsigned int max_order)
39 {
40         struct page *page;
41         struct page_info *info;
42         int i;
43
44         for (i = 0; i < ARRAY_SIZE(orders); i++) {
45                 if (size < (1 << orders[i]) * PAGE_SIZE)
46                         continue;
47                 if (max_order < orders[i])
48                         continue;
49                 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
50                                    __GFP_NOWARN | __GFP_NORETRY, orders[i]);
51                 if (!page)
52                         continue;
53                 if (split_pages)
54                         split_page(page, orders[i]);
55                 info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
56                 info->page = page;
57                 info->order = orders[i];
58                 return info;
59         }
60         return NULL;
61 }
62
63 static int ion_system_heap_allocate(struct ion_heap *heap,
64                                      struct ion_buffer *buffer,
65                                      unsigned long size, unsigned long align,
66                                      unsigned long flags)
67 {
68         struct sg_table *table;
69         struct scatterlist *sg;
70         int ret;
71         struct list_head pages;
72         struct page_info *info, *tmp_info;
73         int i = 0;
74         long size_remaining = PAGE_ALIGN(size);
75         bool split_pages = ion_buffer_fault_user_mappings(buffer);
76
77
78         unsigned int max_order = orders[0];
79
80         INIT_LIST_HEAD(&pages);
81         while (size_remaining > 0) {
82                 info = alloc_largest_available(size_remaining, split_pages,
83                                                max_order);
84                 if (!info)
85                         goto err;
86                 list_add_tail(&info->list, &pages);
87                 size_remaining -= (1 << info->order) * PAGE_SIZE;
88                 max_order = info->order;
89                 i++;
90         }
91
92         table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
93         if (!table)
94                 goto err;
95
96         if (split_pages)
97                 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
98                                      GFP_KERNEL);
99         else
100                 ret = sg_alloc_table(table, i, GFP_KERNEL);
101
102         if (ret)
103                 goto err1;
104
105         sg = table->sgl;
106         list_for_each_entry_safe(info, tmp_info, &pages, list) {
107                 struct page *page = info->page;
108
109                 if (split_pages) {
110                         for (i = 0; i < (1 << info->order); i++) {
111                                 sg_set_page(sg, page + i, PAGE_SIZE, 0);
112                                 sg = sg_next(sg);
113                         }
114                 } else {
115                         sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
116                                     0);
117                         sg = sg_next(sg);
118                 }
119                 list_del(&info->list);
120                 kfree(info);
121         }
122
123         dma_sync_sg_for_device(NULL, table->sgl, table->nents,
124                                DMA_BIDIRECTIONAL);
125
126         buffer->priv_virt = table;
127         return 0;
128 err1:
129         kfree(table);
130 err:
131         list_for_each_entry(info, &pages, list) {
132                 if (split_pages)
133                         for (i = 0; i < (1 << info->order); i++)
134                                 __free_page(info->page + i);
135                 else
136                         __free_pages(info->page, info->order);
137
138                 kfree(info);
139         }
140         return -ENOMEM;
141 }
142
143 void ion_system_heap_free(struct ion_buffer *buffer)
144 {
145         int i;
146         struct scatterlist *sg;
147         struct sg_table *table = buffer->priv_virt;
148
149         for_each_sg(table->sgl, sg, table->nents, i)
150                 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
151         if (buffer->sg_table)
152                 sg_free_table(buffer->sg_table);
153         kfree(buffer->sg_table);
154 }
155
156 struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
157                                          struct ion_buffer *buffer)
158 {
159         return buffer->priv_virt;
160 }
161
162 void ion_system_heap_unmap_dma(struct ion_heap *heap,
163                                struct ion_buffer *buffer)
164 {
165         return;
166 }
167
168 void *ion_system_heap_map_kernel(struct ion_heap *heap,
169                                  struct ion_buffer *buffer)
170 {
171         struct scatterlist *sg;
172         int i, j;
173         void *vaddr;
174         pgprot_t pgprot;
175         struct sg_table *table = buffer->priv_virt;
176         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
177         struct page **pages = kzalloc(sizeof(struct page *) * npages,
178                                      GFP_KERNEL);
179         struct page **tmp = pages;
180
181         if (buffer->flags & ION_FLAG_CACHED)
182                 pgprot = PAGE_KERNEL;
183         else
184                 pgprot = pgprot_writecombine(PAGE_KERNEL);
185
186         for_each_sg(table->sgl, sg, table->nents, i) {
187                 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
188                 struct page *page = sg_page(sg);
189                 BUG_ON(i >= npages);
190                 for (j = 0; j < npages_this_entry; j++) {
191                         *(tmp++) = page++;
192                 }
193         }
194         vaddr = vmap(pages, npages, VM_MAP, pgprot);
195         kfree(pages);
196
197         return vaddr;
198 }
199
200 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
201                                   struct ion_buffer *buffer)
202 {
203         vunmap(buffer->vaddr);
204 }
205
206 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
207                              struct vm_area_struct *vma)
208 {
209         struct sg_table *table = buffer->priv_virt;
210         unsigned long addr = vma->vm_start;
211         unsigned long offset = vma->vm_pgoff;
212         struct scatterlist *sg;
213         int i;
214
215         for_each_sg(table->sgl, sg, table->nents, i) {
216                 if (offset) {
217                         offset--;
218                         continue;
219                 }
220                 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
221                                 sg_dma_len(sg), vma->vm_page_prot);
222                 addr += sg_dma_len(sg);
223                 if (addr >= vma->vm_end)
224                         return 0;
225         }
226         return 0;
227 }
228
229 static struct ion_heap_ops vmalloc_ops = {
230         .allocate = ion_system_heap_allocate,
231         .free = ion_system_heap_free,
232         .map_dma = ion_system_heap_map_dma,
233         .unmap_dma = ion_system_heap_unmap_dma,
234         .map_kernel = ion_system_heap_map_kernel,
235         .unmap_kernel = ion_system_heap_unmap_kernel,
236         .map_user = ion_system_heap_map_user,
237 };
238
239 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
240 {
241         struct ion_heap *heap;
242
243         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
244         if (!heap)
245                 return ERR_PTR(-ENOMEM);
246         heap->ops = &vmalloc_ops;
247         heap->type = ION_HEAP_TYPE_SYSTEM;
248         return heap;
249 }
250
251 void ion_system_heap_destroy(struct ion_heap *heap)
252 {
253         kfree(heap);
254 }
255
256 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
257                                            struct ion_buffer *buffer,
258                                            unsigned long len,
259                                            unsigned long align,
260                                            unsigned long flags)
261 {
262         buffer->priv_virt = kzalloc(len, GFP_KERNEL);
263         if (!buffer->priv_virt)
264                 return -ENOMEM;
265         return 0;
266 }
267
268 void ion_system_contig_heap_free(struct ion_buffer *buffer)
269 {
270         kfree(buffer->priv_virt);
271 }
272
273 static int ion_system_contig_heap_phys(struct ion_heap *heap,
274                                        struct ion_buffer *buffer,
275                                        ion_phys_addr_t *addr, size_t *len)
276 {
277         *addr = virt_to_phys(buffer->priv_virt);
278         *len = buffer->size;
279         return 0;
280 }
281
282 struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
283                                                 struct ion_buffer *buffer)
284 {
285         struct sg_table *table;
286         int ret;
287
288         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
289         if (!table)
290                 return ERR_PTR(-ENOMEM);
291         ret = sg_alloc_table(table, 1, GFP_KERNEL);
292         if (ret) {
293                 kfree(table);
294                 return ERR_PTR(ret);
295         }
296         sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
297                     0);
298         return table;
299 }
300
301 void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
302                                       struct ion_buffer *buffer)
303 {
304         sg_free_table(buffer->sg_table);
305         kfree(buffer->sg_table);
306 }
307
308 int ion_system_contig_heap_map_user(struct ion_heap *heap,
309                                     struct ion_buffer *buffer,
310                                     struct vm_area_struct *vma)
311 {
312         unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
313         return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
314                                vma->vm_end - vma->vm_start,
315                                vma->vm_page_prot);
316
317 }
318
319 static struct ion_heap_ops kmalloc_ops = {
320         .allocate = ion_system_contig_heap_allocate,
321         .free = ion_system_contig_heap_free,
322         .phys = ion_system_contig_heap_phys,
323         .map_dma = ion_system_contig_heap_map_dma,
324         .unmap_dma = ion_system_contig_heap_unmap_dma,
325         .map_kernel = ion_system_heap_map_kernel,
326         .unmap_kernel = ion_system_heap_unmap_kernel,
327         .map_user = ion_system_contig_heap_map_user,
328 };
329
330 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
331 {
332         struct ion_heap *heap;
333
334         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
335         if (!heap)
336                 return ERR_PTR(-ENOMEM);
337         heap->ops = &kmalloc_ops;
338         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
339         return heap;
340 }
341
342 void ion_system_contig_heap_destroy(struct ion_heap *heap)
343 {
344         kfree(heap);
345 }
346