2 * drivers/staging/android/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
29 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
30 __GFP_NORETRY) & ~__GFP_WAIT;
31 static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
32 static const unsigned int orders[] = {8, 4, 0};
33 static const int num_orders = ARRAY_SIZE(orders);
34 static int order_to_index(unsigned int order)
37 for (i = 0; i < num_orders; i++)
38 if (order == orders[i])
44 static unsigned int order_to_size(int order)
46 return PAGE_SIZE << order;
49 struct ion_system_heap {
51 struct ion_page_pool **pools;
57 struct list_head list;
60 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
61 struct ion_buffer *buffer,
64 bool cached = ion_buffer_cached(buffer);
65 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
69 page = ion_page_pool_alloc(pool);
71 gfp_t gfp_flags = low_order_gfp_flags;
74 gfp_flags = high_order_gfp_flags;
75 page = alloc_pages(gfp_flags, order);
78 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
87 static void free_buffer_page(struct ion_system_heap *heap,
88 struct ion_buffer *buffer, struct page *page,
91 bool cached = ion_buffer_cached(buffer);
94 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
95 ion_page_pool_free(pool, page);
97 __free_pages(page, order);
102 static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
103 struct ion_buffer *buffer,
105 unsigned int max_order)
108 struct page_info *info;
111 for (i = 0; i < num_orders; i++) {
112 if (size < order_to_size(orders[i]))
114 if (max_order < orders[i])
117 page = alloc_buffer_page(heap, buffer, orders[i]);
121 info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
123 info->order = orders[i];
129 static int ion_system_heap_allocate(struct ion_heap *heap,
130 struct ion_buffer *buffer,
131 unsigned long size, unsigned long align,
134 struct ion_system_heap *sys_heap = container_of(heap,
135 struct ion_system_heap,
137 struct sg_table *table;
138 struct scatterlist *sg;
140 struct list_head pages;
141 struct page_info *info, *tmp_info;
143 long size_remaining = PAGE_ALIGN(size);
144 unsigned int max_order = orders[0];
146 if (align > PAGE_SIZE)
149 INIT_LIST_HEAD(&pages);
150 while (size_remaining > 0) {
151 info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
154 list_add_tail(&info->list, &pages);
155 size_remaining -= (1 << info->order) * PAGE_SIZE;
156 max_order = info->order;
159 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
163 ret = sg_alloc_table(table, i, GFP_KERNEL);
168 list_for_each_entry_safe(info, tmp_info, &pages, list) {
169 struct page *page = info->page;
170 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
172 list_del(&info->list);
176 buffer->priv_virt = table;
181 list_for_each_entry_safe(info, tmp_info, &pages, list) {
182 free_buffer_page(sys_heap, buffer, info->page, info->order);
188 static void ion_system_heap_free(struct ion_buffer *buffer)
190 struct ion_heap *heap = buffer->heap;
191 struct ion_system_heap *sys_heap = container_of(heap,
192 struct ion_system_heap,
194 struct sg_table *table = buffer->sg_table;
195 bool cached = ion_buffer_cached(buffer);
196 struct scatterlist *sg;
200 /* uncached pages come from the page pools, zero them before returning
201 for security purposes (other allocations are zerod at alloc time */
203 ion_heap_buffer_zero(buffer);
205 for_each_sg(table->sgl, sg, table->nents, i)
206 free_buffer_page(sys_heap, buffer, sg_page(sg),
207 get_order(sg->length));
208 sg_free_table(table);
212 static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
213 struct ion_buffer *buffer)
215 return buffer->priv_virt;
218 static void ion_system_heap_unmap_dma(struct ion_heap *heap,
219 struct ion_buffer *buffer)
224 static struct ion_heap_ops system_heap_ops = {
225 .allocate = ion_system_heap_allocate,
226 .free = ion_system_heap_free,
227 .map_dma = ion_system_heap_map_dma,
228 .unmap_dma = ion_system_heap_unmap_dma,
229 .map_kernel = ion_heap_map_kernel,
230 .unmap_kernel = ion_heap_unmap_kernel,
231 .map_user = ion_heap_map_user,
234 static int ion_system_heap_shrink(struct shrinker *shrinker,
235 struct shrink_control *sc) {
237 struct ion_heap *heap = container_of(shrinker, struct ion_heap,
239 struct ion_system_heap *sys_heap = container_of(heap,
240 struct ion_system_heap,
246 if (sc->nr_to_scan == 0)
249 /* shrink the free list first, no point in zeroing the memory if
250 we're just going to reclaim it */
251 nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
254 if (nr_freed >= sc->nr_to_scan)
257 for (i = 0; i < num_orders; i++) {
258 struct ion_page_pool *pool = sys_heap->pools[i];
260 nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
262 if (nr_freed >= sc->nr_to_scan)
267 /* total number of items is whatever the page pools are holding
268 plus whatever's in the freelist */
269 for (i = 0; i < num_orders; i++) {
270 struct ion_page_pool *pool = sys_heap->pools[i];
271 nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
273 nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
278 static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
282 struct ion_system_heap *sys_heap = container_of(heap,
283 struct ion_system_heap,
286 for (i = 0; i < num_orders; i++) {
287 struct ion_page_pool *pool = sys_heap->pools[i];
288 seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
289 pool->high_count, pool->order,
290 (1 << pool->order) * PAGE_SIZE * pool->high_count);
291 seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
292 pool->low_count, pool->order,
293 (1 << pool->order) * PAGE_SIZE * pool->low_count);
298 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
300 struct ion_system_heap *heap;
303 heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
305 return ERR_PTR(-ENOMEM);
306 heap->heap.ops = &system_heap_ops;
307 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
308 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
309 heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
312 goto err_alloc_pools;
313 for (i = 0; i < num_orders; i++) {
314 struct ion_page_pool *pool;
315 gfp_t gfp_flags = low_order_gfp_flags;
318 gfp_flags = high_order_gfp_flags;
319 pool = ion_page_pool_create(gfp_flags, orders[i]);
321 goto err_create_pool;
322 heap->pools[i] = pool;
325 heap->heap.shrinker.shrink = ion_system_heap_shrink;
326 heap->heap.shrinker.seeks = DEFAULT_SEEKS;
327 heap->heap.shrinker.batch = 0;
328 register_shrinker(&heap->heap.shrinker);
329 heap->heap.debug_show = ion_system_heap_debug_show;
332 for (i = 0; i < num_orders; i++)
334 ion_page_pool_destroy(heap->pools[i]);
338 return ERR_PTR(-ENOMEM);
341 void ion_system_heap_destroy(struct ion_heap *heap)
343 struct ion_system_heap *sys_heap = container_of(heap,
344 struct ion_system_heap,
348 for (i = 0; i < num_orders; i++)
349 ion_page_pool_destroy(sys_heap->pools[i]);
350 kfree(sys_heap->pools);
354 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
355 struct ion_buffer *buffer,
360 int order = get_order(len);
362 struct sg_table *table;
366 if (align > (PAGE_SIZE << order))
369 page = alloc_pages(low_order_gfp_flags, order);
373 split_page(page, order);
375 len = PAGE_ALIGN(len);
376 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
377 __free_page(page + i);
379 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
385 ret = sg_alloc_table(table, 1, GFP_KERNEL);
389 sg_set_page(table->sgl, page, len, 0);
391 buffer->priv_virt = table;
393 ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
398 for (i = 0; i < len >> PAGE_SHIFT; i++)
399 __free_page(page + i);
404 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
406 struct sg_table *table = buffer->priv_virt;
407 struct page *page = sg_page(table->sgl);
408 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
411 for (i = 0; i < pages; i++)
412 __free_page(page + i);
413 sg_free_table(table);
417 static int ion_system_contig_heap_phys(struct ion_heap *heap,
418 struct ion_buffer *buffer,
419 ion_phys_addr_t *addr, size_t *len)
421 struct sg_table *table = buffer->priv_virt;
422 struct page *page = sg_page(table->sgl);
423 *addr = page_to_phys(page);
428 static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
429 struct ion_buffer *buffer)
431 return buffer->priv_virt;
434 static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
435 struct ion_buffer *buffer)
439 static struct ion_heap_ops kmalloc_ops = {
440 .allocate = ion_system_contig_heap_allocate,
441 .free = ion_system_contig_heap_free,
442 .phys = ion_system_contig_heap_phys,
443 .map_dma = ion_system_contig_heap_map_dma,
444 .unmap_dma = ion_system_contig_heap_unmap_dma,
445 .map_kernel = ion_heap_map_kernel,
446 .unmap_kernel = ion_heap_unmap_kernel,
447 .map_user = ion_heap_map_user,
450 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
452 struct ion_heap *heap;
454 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
456 return ERR_PTR(-ENOMEM);
457 heap->ops = &kmalloc_ops;
458 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
462 void ion_system_contig_heap_destroy(struct ion_heap *heap)