gpu: ion: Fix bug in ion_system_heap map_user
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_system_heap.c
1 /*
2  * drivers/staging/android/ion/ion_system_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/mm.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "ion.h"
27 #include "ion_priv.h"
28
29 static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30                                             __GFP_NOWARN | __GFP_NORETRY |
31                                             __GFP_NO_KSWAPD) & ~__GFP_WAIT;
32 static unsigned int low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO |
33                                          __GFP_NOWARN);
34 static const unsigned int orders[] = {8, 4, 0};
35 static const int num_orders = ARRAY_SIZE(orders);
36 static int order_to_index(unsigned int order)
37 {
38         int i;
39         for (i = 0; i < num_orders; i++)
40                 if (order == orders[i])
41                         return i;
42         BUG();
43         return -1;
44 }
45
46 static unsigned int order_to_size(int order)
47 {
48         return PAGE_SIZE << order;
49 }
50
51 struct ion_system_heap {
52         struct ion_heap heap;
53         struct ion_page_pool **pools;
54 };
55
56 struct page_info {
57         struct page *page;
58         unsigned int order;
59         struct list_head list;
60 };
61
62 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63                                       struct ion_buffer *buffer,
64                                       unsigned long order)
65 {
66         bool cached = ion_buffer_cached(buffer);
67         bool split_pages = ion_buffer_fault_user_mappings(buffer);
68         struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69         struct page *page;
70
71         if (!cached) {
72                 page = ion_page_pool_alloc(pool);
73         } else {
74                 gfp_t gfp_flags = low_order_gfp_flags;
75
76                 if (order > 4)
77                         gfp_flags = high_order_gfp_flags;
78                 page = alloc_pages(gfp_flags, order);
79                 if (!page)
80                         return 0;
81                 __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
82                                       DMA_BIDIRECTIONAL);
83         }
84         if (!page)
85                 return 0;
86
87         if (split_pages)
88                 split_page(page, order);
89         return page;
90 }
91
92 static void free_buffer_page(struct ion_system_heap *heap,
93                              struct ion_buffer *buffer, struct page *page,
94                              unsigned int order)
95 {
96         bool cached = ion_buffer_cached(buffer);
97         bool split_pages = ion_buffer_fault_user_mappings(buffer);
98         int i;
99
100         if (!cached) {
101                 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
102                 /* zero the pages before returning them to the pool for
103                    security.  This uses vmap as we want to set the pgprot so
104                    the writes to occur to noncached mappings, as the pool's
105                    purpose is to keep the pages out of the cache */
106                 for (i = 0; i < order / PAGE_SIZE; i++) {
107                         struct page *sub_page = page + i;
108                         void *addr = vmap(&sub_page, 1, VM_MAP,
109                                           pgprot_writecombine(PAGE_KERNEL));
110                         memset(addr, 0, PAGE_SIZE);
111                         vunmap(addr);
112                 }
113                 ion_page_pool_free(pool, page);
114         } else if (split_pages) {
115                 for (i = 0; i < (1 << order); i++)
116                         __free_page(page + i);
117         } else {
118                 __free_pages(page, order);
119         }
120 }
121
122
123 static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
124                                                  struct ion_buffer *buffer,
125                                                  unsigned long size,
126                                                  unsigned int max_order)
127 {
128         struct page *page;
129         struct page_info *info;
130         int i;
131
132         for (i = 0; i < num_orders; i++) {
133                 if (size < order_to_size(orders[i]))
134                         continue;
135                 if (max_order < orders[i])
136                         continue;
137
138                 page = alloc_buffer_page(heap, buffer, orders[i]);
139                 if (!page)
140                         continue;
141
142                 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
143                 info->page = page;
144                 info->order = orders[i];
145                 return info;
146         }
147         return NULL;
148 }
149
150 static int ion_system_heap_allocate(struct ion_heap *heap,
151                                      struct ion_buffer *buffer,
152                                      unsigned long size, unsigned long align,
153                                      unsigned long flags)
154 {
155         struct ion_system_heap *sys_heap = container_of(heap,
156                                                         struct ion_system_heap,
157                                                         heap);
158         struct sg_table *table;
159         struct scatterlist *sg;
160         int ret;
161         struct list_head pages;
162         struct page_info *info, *tmp_info;
163         int i = 0;
164         long size_remaining = PAGE_ALIGN(size);
165         unsigned int max_order = orders[0];
166         bool split_pages = ion_buffer_fault_user_mappings(buffer);
167
168         INIT_LIST_HEAD(&pages);
169         while (size_remaining > 0) {
170                 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
171                 if (!info)
172                         goto err;
173                 list_add_tail(&info->list, &pages);
174                 size_remaining -= (1 << info->order) * PAGE_SIZE;
175                 max_order = info->order;
176                 i++;
177         }
178
179         table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
180         if (!table)
181                 goto err;
182
183         if (split_pages)
184                 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
185                                      GFP_KERNEL);
186         else
187                 ret = sg_alloc_table(table, i, GFP_KERNEL);
188
189         if (ret)
190                 goto err1;
191
192         sg = table->sgl;
193         list_for_each_entry_safe(info, tmp_info, &pages, list) {
194                 struct page *page = info->page;
195                 if (split_pages) {
196                         for (i = 0; i < (1 << info->order); i++) {
197                                 sg_set_page(sg, page + i, PAGE_SIZE, 0);
198                                 sg = sg_next(sg);
199                         }
200                 } else {
201                         sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
202                                     0);
203                         sg = sg_next(sg);
204                 }
205                 list_del(&info->list);
206                 kfree(info);
207         }
208
209         buffer->priv_virt = table;
210         return 0;
211 err1:
212         kfree(table);
213 err:
214         list_for_each_entry(info, &pages, list) {
215                 free_buffer_page(sys_heap, buffer, info->page, info->order);
216                 kfree(info);
217         }
218         return -ENOMEM;
219 }
220
221 void ion_system_heap_free(struct ion_buffer *buffer)
222 {
223         struct ion_heap *heap = buffer->heap;
224         struct ion_system_heap *sys_heap = container_of(heap,
225                                                         struct ion_system_heap,
226                                                         heap);
227         struct sg_table *table = buffer->priv_virt;
228         struct scatterlist *sg;
229         LIST_HEAD(pages);
230         int i;
231
232         for_each_sg(table->sgl, sg, table->nents, i)
233                 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
234         sg_free_table(table);
235         kfree(table);
236 }
237
238 struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
239                                          struct ion_buffer *buffer)
240 {
241         return buffer->priv_virt;
242 }
243
244 void ion_system_heap_unmap_dma(struct ion_heap *heap,
245                                struct ion_buffer *buffer)
246 {
247         return;
248 }
249
250 void *ion_system_heap_map_kernel(struct ion_heap *heap,
251                                  struct ion_buffer *buffer)
252 {
253         struct scatterlist *sg;
254         int i, j;
255         void *vaddr;
256         pgprot_t pgprot;
257         struct sg_table *table = buffer->priv_virt;
258         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
259         struct page **pages = vmalloc(sizeof(struct page *) * npages);
260         struct page **tmp = pages;
261
262         if (!pages)
263                 return 0;
264
265         if (buffer->flags & ION_FLAG_CACHED)
266                 pgprot = PAGE_KERNEL;
267         else
268                 pgprot = pgprot_writecombine(PAGE_KERNEL);
269
270         for_each_sg(table->sgl, sg, table->nents, i) {
271                 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
272                 struct page *page = sg_page(sg);
273                 BUG_ON(i >= npages);
274                 for (j = 0; j < npages_this_entry; j++) {
275                         *(tmp++) = page++;
276                 }
277         }
278         vaddr = vmap(pages, npages, VM_MAP, pgprot);
279         vfree(pages);
280
281         return vaddr;
282 }
283
284 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
285                                   struct ion_buffer *buffer)
286 {
287         vunmap(buffer->vaddr);
288 }
289
290 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
291                              struct vm_area_struct *vma)
292 {
293         struct sg_table *table = buffer->priv_virt;
294         unsigned long addr = vma->vm_start;
295         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
296         struct scatterlist *sg;
297         int i;
298
299         for_each_sg(table->sgl, sg, table->nents, i) {
300                 struct page *page = sg_page(sg);
301                 unsigned long remainder = vma->vm_end - addr;
302                 unsigned long len = sg_dma_len(sg);
303
304                 if (offset >= sg_dma_len(sg)) {
305                         offset -= sg_dma_len(sg);
306                         continue;
307                 } else if (offset) {
308                         page += offset / PAGE_SIZE;
309                         len = sg_dma_len(sg) - offset;
310                         offset = 0;
311                 }
312                 len = min(len, remainder);
313                 remap_pfn_range(vma, addr, page_to_pfn(page), len,
314                                 vma->vm_page_prot);
315                 addr += len;
316                 if (addr >= vma->vm_end)
317                         return 0;
318         }
319         return 0;
320 }
321
322 static struct ion_heap_ops system_heap_ops = {
323         .allocate = ion_system_heap_allocate,
324         .free = ion_system_heap_free,
325         .map_dma = ion_system_heap_map_dma,
326         .unmap_dma = ion_system_heap_unmap_dma,
327         .map_kernel = ion_system_heap_map_kernel,
328         .unmap_kernel = ion_system_heap_unmap_kernel,
329         .map_user = ion_system_heap_map_user,
330 };
331
332 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
333                                       void *unused)
334 {
335
336         struct ion_system_heap *sys_heap = container_of(heap,
337                                                         struct ion_system_heap,
338                                                         heap);
339         int i;
340         for (i = 0; i < num_orders; i++) {
341                 struct ion_page_pool *pool = sys_heap->pools[i];
342                 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
343                            pool->high_count, pool->order,
344                            (1 << pool->order) * PAGE_SIZE * pool->high_count);
345                 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
346                            pool->low_count, pool->order,
347                            (1 << pool->order) * PAGE_SIZE * pool->low_count);
348         }
349         return 0;
350 }
351
352 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
353 {
354         struct ion_system_heap *heap;
355         int i;
356
357         heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
358         if (!heap)
359                 return ERR_PTR(-ENOMEM);
360         heap->heap.ops = &system_heap_ops;
361         heap->heap.type = ION_HEAP_TYPE_SYSTEM;
362         heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
363                               GFP_KERNEL);
364         if (!heap->pools)
365                 goto err_alloc_pools;
366         for (i = 0; i < num_orders; i++) {
367                 struct ion_page_pool *pool;
368                 gfp_t gfp_flags = low_order_gfp_flags;
369
370                 if (orders[i] > 4)
371                         gfp_flags = high_order_gfp_flags;
372                 pool = ion_page_pool_create(gfp_flags, orders[i]);
373                 if (!pool)
374                         goto err_create_pool;
375                 heap->pools[i] = pool;
376         }
377         heap->heap.debug_show = ion_system_heap_debug_show;
378         return &heap->heap;
379 err_create_pool:
380         for (i = 0; i < num_orders; i++)
381                 if (heap->pools[i])
382                         ion_page_pool_destroy(heap->pools[i]);
383         kfree(heap->pools);
384 err_alloc_pools:
385         kfree(heap);
386         return ERR_PTR(-ENOMEM);
387 }
388
389 void ion_system_heap_destroy(struct ion_heap *heap)
390 {
391         struct ion_system_heap *sys_heap = container_of(heap,
392                                                         struct ion_system_heap,
393                                                         heap);
394         int i;
395
396         for (i = 0; i < num_orders; i++)
397                 ion_page_pool_destroy(sys_heap->pools[i]);
398         kfree(sys_heap->pools);
399         kfree(sys_heap);
400 }
401
402 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
403                                            struct ion_buffer *buffer,
404                                            unsigned long len,
405                                            unsigned long align,
406                                            unsigned long flags)
407 {
408         buffer->priv_virt = kzalloc(len, GFP_KERNEL);
409         if (!buffer->priv_virt)
410                 return -ENOMEM;
411         return 0;
412 }
413
414 void ion_system_contig_heap_free(struct ion_buffer *buffer)
415 {
416         kfree(buffer->priv_virt);
417 }
418
419 static int ion_system_contig_heap_phys(struct ion_heap *heap,
420                                        struct ion_buffer *buffer,
421                                        ion_phys_addr_t *addr, size_t *len)
422 {
423         *addr = virt_to_phys(buffer->priv_virt);
424         *len = buffer->size;
425         return 0;
426 }
427
428 struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
429                                                 struct ion_buffer *buffer)
430 {
431         struct sg_table *table;
432         int ret;
433
434         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
435         if (!table)
436                 return ERR_PTR(-ENOMEM);
437         ret = sg_alloc_table(table, 1, GFP_KERNEL);
438         if (ret) {
439                 kfree(table);
440                 return ERR_PTR(ret);
441         }
442         sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
443                     0);
444         return table;
445 }
446
447 void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
448                                       struct ion_buffer *buffer)
449 {
450         sg_free_table(buffer->sg_table);
451         kfree(buffer->sg_table);
452 }
453
454 int ion_system_contig_heap_map_user(struct ion_heap *heap,
455                                     struct ion_buffer *buffer,
456                                     struct vm_area_struct *vma)
457 {
458         unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
459         return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
460                                vma->vm_end - vma->vm_start,
461                                vma->vm_page_prot);
462
463 }
464
465 static struct ion_heap_ops kmalloc_ops = {
466         .allocate = ion_system_contig_heap_allocate,
467         .free = ion_system_contig_heap_free,
468         .phys = ion_system_contig_heap_phys,
469         .map_dma = ion_system_contig_heap_map_dma,
470         .unmap_dma = ion_system_contig_heap_unmap_dma,
471         .map_kernel = ion_system_heap_map_kernel,
472         .unmap_kernel = ion_system_heap_unmap_kernel,
473         .map_user = ion_system_contig_heap_map_user,
474 };
475
476 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
477 {
478         struct ion_heap *heap;
479
480         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
481         if (!heap)
482                 return ERR_PTR(-ENOMEM);
483         heap->ops = &kmalloc_ops;
484         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
485         return heap;
486 }
487
488 void ion_system_contig_heap_destroy(struct ion_heap *heap)
489 {
490         kfree(heap);
491 }
492