2 * drivers/staging/android/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
29 static unsigned int high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
30 __GFP_NOWARN | __GFP_NORETRY |
31 __GFP_NO_KSWAPD) & ~__GFP_WAIT;
32 static unsigned int low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO |
34 static const unsigned int orders[] = {8, 4, 0};
35 static const int num_orders = ARRAY_SIZE(orders);
36 static int order_to_index(unsigned int order)
39 for (i = 0; i < num_orders; i++)
40 if (order == orders[i])
46 static unsigned int order_to_size(int order)
48 return PAGE_SIZE << order;
51 struct ion_system_heap {
53 struct ion_page_pool **pools;
59 struct list_head list;
62 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
63 struct ion_buffer *buffer,
66 bool cached = ion_buffer_cached(buffer);
67 bool split_pages = ion_buffer_fault_user_mappings(buffer);
68 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
72 page = ion_page_pool_alloc(pool);
74 gfp_t gfp_flags = low_order_gfp_flags;
77 gfp_flags = high_order_gfp_flags;
78 page = alloc_pages(gfp_flags, order);
81 __dma_page_cpu_to_dev(page, 0, PAGE_SIZE << order,
88 split_page(page, order);
92 static void free_buffer_page(struct ion_system_heap *heap,
93 struct ion_buffer *buffer, struct page *page,
96 bool cached = ion_buffer_cached(buffer);
97 bool split_pages = ion_buffer_fault_user_mappings(buffer);
101 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
102 /* zero the pages before returning them to the pool for
103 security. This uses vmap as we want to set the pgprot so
104 the writes to occur to noncached mappings, as the pool's
105 purpose is to keep the pages out of the cache */
106 for (i = 0; i < order / PAGE_SIZE; i++) {
107 struct page *sub_page = page + i;
108 void *addr = vmap(&sub_page, 1, VM_MAP,
109 pgprot_writecombine(PAGE_KERNEL));
110 memset(addr, 0, PAGE_SIZE);
113 ion_page_pool_free(pool, page);
114 } else if (split_pages) {
115 for (i = 0; i < (1 << order); i++)
116 __free_page(page + i);
118 __free_pages(page, order);
123 static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
124 struct ion_buffer *buffer,
126 unsigned int max_order)
129 struct page_info *info;
132 for (i = 0; i < num_orders; i++) {
133 if (size < order_to_size(orders[i]))
135 if (max_order < orders[i])
138 page = alloc_buffer_page(heap, buffer, orders[i]);
142 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
144 info->order = orders[i];
150 static int ion_system_heap_allocate(struct ion_heap *heap,
151 struct ion_buffer *buffer,
152 unsigned long size, unsigned long align,
155 struct ion_system_heap *sys_heap = container_of(heap,
156 struct ion_system_heap,
158 struct sg_table *table;
159 struct scatterlist *sg;
161 struct list_head pages;
162 struct page_info *info, *tmp_info;
164 long size_remaining = PAGE_ALIGN(size);
165 unsigned int max_order = orders[0];
166 bool split_pages = ion_buffer_fault_user_mappings(buffer);
168 INIT_LIST_HEAD(&pages);
169 while (size_remaining > 0) {
170 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
173 list_add_tail(&info->list, &pages);
174 size_remaining -= (1 << info->order) * PAGE_SIZE;
175 max_order = info->order;
179 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
184 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
187 ret = sg_alloc_table(table, i, GFP_KERNEL);
193 list_for_each_entry_safe(info, tmp_info, &pages, list) {
194 struct page *page = info->page;
196 for (i = 0; i < (1 << info->order); i++) {
197 sg_set_page(sg, page + i, PAGE_SIZE, 0);
201 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
205 list_del(&info->list);
209 buffer->priv_virt = table;
214 list_for_each_entry(info, &pages, list) {
215 free_buffer_page(sys_heap, buffer, info->page, info->order);
221 void ion_system_heap_free(struct ion_buffer *buffer)
223 struct ion_heap *heap = buffer->heap;
224 struct ion_system_heap *sys_heap = container_of(heap,
225 struct ion_system_heap,
227 struct sg_table *table = buffer->priv_virt;
228 struct scatterlist *sg;
232 for_each_sg(table->sgl, sg, table->nents, i)
233 free_buffer_page(sys_heap, buffer, sg_page(sg), get_order(sg_dma_len(sg)));
234 sg_free_table(table);
238 struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
239 struct ion_buffer *buffer)
241 return buffer->priv_virt;
244 void ion_system_heap_unmap_dma(struct ion_heap *heap,
245 struct ion_buffer *buffer)
250 void *ion_system_heap_map_kernel(struct ion_heap *heap,
251 struct ion_buffer *buffer)
253 struct scatterlist *sg;
257 struct sg_table *table = buffer->priv_virt;
258 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
259 struct page **pages = vmalloc(sizeof(struct page *) * npages);
260 struct page **tmp = pages;
265 if (buffer->flags & ION_FLAG_CACHED)
266 pgprot = PAGE_KERNEL;
268 pgprot = pgprot_writecombine(PAGE_KERNEL);
270 for_each_sg(table->sgl, sg, table->nents, i) {
271 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
272 struct page *page = sg_page(sg);
274 for (j = 0; j < npages_this_entry; j++) {
278 vaddr = vmap(pages, npages, VM_MAP, pgprot);
284 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
285 struct ion_buffer *buffer)
287 vunmap(buffer->vaddr);
290 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
291 struct vm_area_struct *vma)
293 struct sg_table *table = buffer->priv_virt;
294 unsigned long addr = vma->vm_start;
295 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
296 struct scatterlist *sg;
299 for_each_sg(table->sgl, sg, table->nents, i) {
300 struct page *page = sg_page(sg);
301 unsigned long remainder = vma->vm_end - addr;
302 unsigned long len = sg_dma_len(sg);
304 if (offset >= sg_dma_len(sg)) {
305 offset -= sg_dma_len(sg);
308 page += offset / PAGE_SIZE;
309 len = sg_dma_len(sg) - offset;
312 len = min(len, remainder);
313 remap_pfn_range(vma, addr, page_to_pfn(page), len,
316 if (addr >= vma->vm_end)
322 static struct ion_heap_ops system_heap_ops = {
323 .allocate = ion_system_heap_allocate,
324 .free = ion_system_heap_free,
325 .map_dma = ion_system_heap_map_dma,
326 .unmap_dma = ion_system_heap_unmap_dma,
327 .map_kernel = ion_system_heap_map_kernel,
328 .unmap_kernel = ion_system_heap_unmap_kernel,
329 .map_user = ion_system_heap_map_user,
332 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
336 struct ion_system_heap *sys_heap = container_of(heap,
337 struct ion_system_heap,
340 for (i = 0; i < num_orders; i++) {
341 struct ion_page_pool *pool = sys_heap->pools[i];
342 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
343 pool->high_count, pool->order,
344 (1 << pool->order) * PAGE_SIZE * pool->high_count);
345 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
346 pool->low_count, pool->order,
347 (1 << pool->order) * PAGE_SIZE * pool->low_count);
352 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
354 struct ion_system_heap *heap;
357 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
359 return ERR_PTR(-ENOMEM);
360 heap->heap.ops = &system_heap_ops;
361 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
362 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
365 goto err_alloc_pools;
366 for (i = 0; i < num_orders; i++) {
367 struct ion_page_pool *pool;
368 gfp_t gfp_flags = low_order_gfp_flags;
371 gfp_flags = high_order_gfp_flags;
372 pool = ion_page_pool_create(gfp_flags, orders[i]);
374 goto err_create_pool;
375 heap->pools[i] = pool;
377 heap->heap.debug_show = ion_system_heap_debug_show;
380 for (i = 0; i < num_orders; i++)
382 ion_page_pool_destroy(heap->pools[i]);
386 return ERR_PTR(-ENOMEM);
389 void ion_system_heap_destroy(struct ion_heap *heap)
391 struct ion_system_heap *sys_heap = container_of(heap,
392 struct ion_system_heap,
396 for (i = 0; i < num_orders; i++)
397 ion_page_pool_destroy(sys_heap->pools[i]);
398 kfree(sys_heap->pools);
402 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
403 struct ion_buffer *buffer,
408 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
409 if (!buffer->priv_virt)
414 void ion_system_contig_heap_free(struct ion_buffer *buffer)
416 kfree(buffer->priv_virt);
419 static int ion_system_contig_heap_phys(struct ion_heap *heap,
420 struct ion_buffer *buffer,
421 ion_phys_addr_t *addr, size_t *len)
423 *addr = virt_to_phys(buffer->priv_virt);
428 struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
429 struct ion_buffer *buffer)
431 struct sg_table *table;
434 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
436 return ERR_PTR(-ENOMEM);
437 ret = sg_alloc_table(table, 1, GFP_KERNEL);
442 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
447 void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
448 struct ion_buffer *buffer)
450 sg_free_table(buffer->sg_table);
451 kfree(buffer->sg_table);
454 int ion_system_contig_heap_map_user(struct ion_heap *heap,
455 struct ion_buffer *buffer,
456 struct vm_area_struct *vma)
458 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
459 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
460 vma->vm_end - vma->vm_start,
465 static struct ion_heap_ops kmalloc_ops = {
466 .allocate = ion_system_contig_heap_allocate,
467 .free = ion_system_contig_heap_free,
468 .phys = ion_system_contig_heap_phys,
469 .map_dma = ion_system_contig_heap_map_dma,
470 .unmap_dma = ion_system_contig_heap_unmap_dma,
471 .map_kernel = ion_system_heap_map_kernel,
472 .unmap_kernel = ion_system_heap_unmap_kernel,
473 .map_user = ion_system_contig_heap_map_user,
476 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
478 struct ion_heap *heap;
480 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
482 return ERR_PTR(-ENOMEM);
483 heap->ops = &kmalloc_ops;
484 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
488 void ion_system_contig_heap_destroy(struct ion_heap *heap)