e7fb07d45a2aa77a9dca0d89302a619b1303b987
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / ion / ion_system_heap.c
1 /*
2  * drivers/gpu/ion/ion_system_heap.c
3  *
4  * Copyright (C) 2011 Google, Inc.
5  *
6  * This software is licensed under the terms of the GNU General Public
7  * License version 2, as published by the Free Software Foundation, and
8  * may be copied, distributed, and modified under those terms.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  */
16
17 #include <asm/page.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
21 #include <linux/ion.h>
22 #include <linux/mm.h>
23 #include <linux/scatterlist.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include "ion_priv.h"
27
28 struct page_info {
29         struct page *page;
30         unsigned long order;
31         struct list_head list;
32 };
33
34 static struct page_info *alloc_largest_available(unsigned long size)
35 {
36         static unsigned int orders[] = {8, 4, 0};
37         struct page *page;
38         struct page_info *info;
39         int i;
40
41         for (i = 0; i < ARRAY_SIZE(orders); i++) {
42                 if (size < (1 << orders[i]) * PAGE_SIZE)
43                         continue;
44                 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
45                                    __GFP_NOWARN | __GFP_NORETRY, orders[i]);
46                 if (!page)
47                         continue;
48                 split_page(page, orders[i]);
49                 info = kmap(page);
50                 info->page = page;
51                 info->order = orders[i];
52                 return info;
53         }
54         return NULL;
55 }
56
57 static int ion_system_heap_allocate(struct ion_heap *heap,
58                                      struct ion_buffer *buffer,
59                                      unsigned long size, unsigned long align,
60                                      unsigned long flags)
61 {
62         struct sg_table *table;
63         struct scatterlist *sg;
64         int ret;
65         struct list_head pages;
66         struct page_info *info, *tmp_info;
67         int i;
68         long size_remaining = PAGE_ALIGN(size);
69
70         INIT_LIST_HEAD(&pages);
71         while (size_remaining > 0) {
72                 info = alloc_largest_available(size_remaining);
73                 if (!info)
74                         goto err;
75                 list_add_tail(&info->list, &pages);
76                 size_remaining -= (1 << info->order) * PAGE_SIZE;
77         }
78
79         table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
80         if (!table)
81                 goto err;
82
83         ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
84         if (ret)
85                 goto err1;
86
87         sg = table->sgl;
88         list_for_each_entry_safe(info, tmp_info, &pages, list) {
89                 struct page *page = info->page;
90                 for (i = 0; i < (1 << info->order); i++) {
91                         sg_set_page(sg, page + i, PAGE_SIZE, 0);
92                         sg = sg_next(sg);
93                 }
94                 list_del(&info->list);
95                 memset(info, 0, sizeof(struct page_info));
96                 kunmap(page);
97         }
98
99         dma_sync_sg_for_device(NULL, table->sgl, table->nents,
100                                DMA_BIDIRECTIONAL);
101
102         buffer->priv_virt = table;
103         return 0;
104 err1:
105         kfree(table);
106 err:
107         list_for_each_entry(info, &pages, list) {
108                 for (i = 0; i < (1 << info->order); i++)
109                         __free_page(info->page + i);
110                 kunmap(info->page);
111         }
112         return -ENOMEM;
113 }
114
115 void ion_system_heap_free(struct ion_buffer *buffer)
116 {
117         int i;
118         struct scatterlist *sg;
119         struct sg_table *table = buffer->priv_virt;
120
121         for_each_sg(table->sgl, sg, table->nents, i)
122                 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
123         if (buffer->sg_table)
124                 sg_free_table(buffer->sg_table);
125         kfree(buffer->sg_table);
126 }
127
128 struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
129                                          struct ion_buffer *buffer)
130 {
131         return buffer->priv_virt;
132 }
133
134 void ion_system_heap_unmap_dma(struct ion_heap *heap,
135                                struct ion_buffer *buffer)
136 {
137         return;
138 }
139
140 void *ion_system_heap_map_kernel(struct ion_heap *heap,
141                                  struct ion_buffer *buffer)
142 {
143         struct scatterlist *sg;
144         int i, j;
145         void *vaddr;
146         pgprot_t pgprot;
147         struct sg_table *table = buffer->priv_virt;
148         int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
149         struct page **pages = kzalloc(sizeof(struct page *) * npages,
150                                      GFP_KERNEL);
151         struct page **tmp = pages;
152
153         if (buffer->flags & ION_FLAG_CACHED)
154                 pgprot = PAGE_KERNEL;
155         else
156                 pgprot = pgprot_writecombine(PAGE_KERNEL);
157
158         for_each_sg(table->sgl, sg, table->nents, i) {
159                 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
160                 struct page *page = sg_page(sg);
161                 BUG_ON(i >= npages);
162                 for (j = 0; j < npages_this_entry; j++) {
163                         *(tmp++) = page++;
164                 }
165         }
166         vaddr = vmap(pages, npages, VM_MAP, pgprot);
167         kfree(pages);
168
169         return vaddr;
170 }
171
172 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
173                                   struct ion_buffer *buffer)
174 {
175         vunmap(buffer->vaddr);
176 }
177
178 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
179                              struct vm_area_struct *vma)
180 {
181         struct sg_table *table = buffer->priv_virt;
182         unsigned long addr = vma->vm_start;
183         unsigned long offset = vma->vm_pgoff;
184         struct scatterlist *sg;
185         int i;
186
187         for_each_sg(table->sgl, sg, table->nents, i) {
188                 if (offset) {
189                         offset--;
190                         continue;
191                 }
192                 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
193                                 sg_dma_len(sg), vma->vm_page_prot);
194                 addr += sg_dma_len(sg);
195                 if (addr >= vma->vm_end)
196                         return 0;
197         }
198         return 0;
199 }
200
201 static struct ion_heap_ops vmalloc_ops = {
202         .allocate = ion_system_heap_allocate,
203         .free = ion_system_heap_free,
204         .map_dma = ion_system_heap_map_dma,
205         .unmap_dma = ion_system_heap_unmap_dma,
206         .map_kernel = ion_system_heap_map_kernel,
207         .unmap_kernel = ion_system_heap_unmap_kernel,
208         .map_user = ion_system_heap_map_user,
209 };
210
211 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
212 {
213         struct ion_heap *heap;
214
215         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
216         if (!heap)
217                 return ERR_PTR(-ENOMEM);
218         heap->ops = &vmalloc_ops;
219         heap->type = ION_HEAP_TYPE_SYSTEM;
220         return heap;
221 }
222
223 void ion_system_heap_destroy(struct ion_heap *heap)
224 {
225         kfree(heap);
226 }
227
228 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
229                                            struct ion_buffer *buffer,
230                                            unsigned long len,
231                                            unsigned long align,
232                                            unsigned long flags)
233 {
234         buffer->priv_virt = kzalloc(len, GFP_KERNEL);
235         if (!buffer->priv_virt)
236                 return -ENOMEM;
237         return 0;
238 }
239
240 void ion_system_contig_heap_free(struct ion_buffer *buffer)
241 {
242         kfree(buffer->priv_virt);
243 }
244
245 static int ion_system_contig_heap_phys(struct ion_heap *heap,
246                                        struct ion_buffer *buffer,
247                                        ion_phys_addr_t *addr, size_t *len)
248 {
249         *addr = virt_to_phys(buffer->priv_virt);
250         *len = buffer->size;
251         return 0;
252 }
253
254 struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
255                                                 struct ion_buffer *buffer)
256 {
257         struct sg_table *table;
258         int ret;
259
260         table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
261         if (!table)
262                 return ERR_PTR(-ENOMEM);
263         ret = sg_alloc_table(table, 1, GFP_KERNEL);
264         if (ret) {
265                 kfree(table);
266                 return ERR_PTR(ret);
267         }
268         sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
269                     0);
270         return table;
271 }
272
273 void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
274                                       struct ion_buffer *buffer)
275 {
276         sg_free_table(buffer->sg_table);
277         kfree(buffer->sg_table);
278 }
279
280 int ion_system_contig_heap_map_user(struct ion_heap *heap,
281                                     struct ion_buffer *buffer,
282                                     struct vm_area_struct *vma)
283 {
284         unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
285         return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
286                                vma->vm_end - vma->vm_start,
287                                vma->vm_page_prot);
288
289 }
290
291 static struct ion_heap_ops kmalloc_ops = {
292         .allocate = ion_system_contig_heap_allocate,
293         .free = ion_system_contig_heap_free,
294         .phys = ion_system_contig_heap_phys,
295         .map_dma = ion_system_contig_heap_map_dma,
296         .unmap_dma = ion_system_contig_heap_unmap_dma,
297         .map_kernel = ion_system_heap_map_kernel,
298         .unmap_kernel = ion_system_heap_unmap_kernel,
299         .map_user = ion_system_contig_heap_map_user,
300 };
301
302 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
303 {
304         struct ion_heap *heap;
305
306         heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
307         if (!heap)
308                 return ERR_PTR(-ENOMEM);
309         heap->ops = &kmalloc_ops;
310         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
311         return heap;
312 }
313
314 void ion_system_contig_heap_destroy(struct ion_heap *heap)
315 {
316         kfree(heap);
317 }
318