2 * drivers/staging/android/ion/ion_system_heap.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/highmem.h>
22 #include <linux/scatterlist.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
31 struct list_head list;
34 static unsigned int orders[] = {8, 4, 0};
36 static struct page_info *alloc_largest_available(unsigned long size,
38 unsigned int max_order)
41 struct page_info *info;
44 for (i = 0; i < ARRAY_SIZE(orders); i++) {
45 if (size < (1 << orders[i]) * PAGE_SIZE)
47 if (max_order < orders[i])
49 page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
50 __GFP_NOWARN | __GFP_NORETRY, orders[i]);
54 split_page(page, orders[i]);
55 info = kmalloc(sizeof(struct page_info *), GFP_KERNEL);
57 info->order = orders[i];
63 static int ion_system_heap_allocate(struct ion_heap *heap,
64 struct ion_buffer *buffer,
65 unsigned long size, unsigned long align,
68 struct sg_table *table;
69 struct scatterlist *sg;
71 struct list_head pages;
72 struct page_info *info, *tmp_info;
74 long size_remaining = PAGE_ALIGN(size);
75 bool split_pages = ion_buffer_fault_user_mappings(buffer);
78 unsigned int max_order = orders[0];
80 INIT_LIST_HEAD(&pages);
81 while (size_remaining > 0) {
82 info = alloc_largest_available(size_remaining, split_pages,
86 list_add_tail(&info->list, &pages);
87 size_remaining -= (1 << info->order) * PAGE_SIZE;
88 max_order = info->order;
92 table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
97 ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE,
100 ret = sg_alloc_table(table, i, GFP_KERNEL);
106 list_for_each_entry_safe(info, tmp_info, &pages, list) {
107 struct page *page = info->page;
110 for (i = 0; i < (1 << info->order); i++) {
111 sg_set_page(sg, page + i, PAGE_SIZE, 0);
115 sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE,
119 list_del(&info->list);
123 dma_sync_sg_for_device(NULL, table->sgl, table->nents,
126 buffer->priv_virt = table;
131 list_for_each_entry(info, &pages, list) {
133 for (i = 0; i < (1 << info->order); i++)
134 __free_page(info->page + i);
136 __free_pages(info->page, info->order);
143 void ion_system_heap_free(struct ion_buffer *buffer)
146 struct scatterlist *sg;
147 struct sg_table *table = buffer->priv_virt;
149 for_each_sg(table->sgl, sg, table->nents, i)
150 __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
151 if (buffer->sg_table)
152 sg_free_table(buffer->sg_table);
153 kfree(buffer->sg_table);
156 struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
157 struct ion_buffer *buffer)
159 return buffer->priv_virt;
162 void ion_system_heap_unmap_dma(struct ion_heap *heap,
163 struct ion_buffer *buffer)
168 void *ion_system_heap_map_kernel(struct ion_heap *heap,
169 struct ion_buffer *buffer)
171 struct scatterlist *sg;
175 struct sg_table *table = buffer->priv_virt;
176 int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
177 struct page **pages = kzalloc(sizeof(struct page *) * npages,
179 struct page **tmp = pages;
181 if (buffer->flags & ION_FLAG_CACHED)
182 pgprot = PAGE_KERNEL;
184 pgprot = pgprot_writecombine(PAGE_KERNEL);
186 for_each_sg(table->sgl, sg, table->nents, i) {
187 int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
188 struct page *page = sg_page(sg);
190 for (j = 0; j < npages_this_entry; j++) {
194 vaddr = vmap(pages, npages, VM_MAP, pgprot);
200 void ion_system_heap_unmap_kernel(struct ion_heap *heap,
201 struct ion_buffer *buffer)
203 vunmap(buffer->vaddr);
206 int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
207 struct vm_area_struct *vma)
209 struct sg_table *table = buffer->priv_virt;
210 unsigned long addr = vma->vm_start;
211 unsigned long offset = vma->vm_pgoff;
212 struct scatterlist *sg;
215 for_each_sg(table->sgl, sg, table->nents, i) {
220 remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
221 sg_dma_len(sg), vma->vm_page_prot);
222 addr += sg_dma_len(sg);
223 if (addr >= vma->vm_end)
229 static struct ion_heap_ops vmalloc_ops = {
230 .allocate = ion_system_heap_allocate,
231 .free = ion_system_heap_free,
232 .map_dma = ion_system_heap_map_dma,
233 .unmap_dma = ion_system_heap_unmap_dma,
234 .map_kernel = ion_system_heap_map_kernel,
235 .unmap_kernel = ion_system_heap_unmap_kernel,
236 .map_user = ion_system_heap_map_user,
239 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
241 struct ion_heap *heap;
243 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
245 return ERR_PTR(-ENOMEM);
246 heap->ops = &vmalloc_ops;
247 heap->type = ION_HEAP_TYPE_SYSTEM;
251 void ion_system_heap_destroy(struct ion_heap *heap)
256 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
257 struct ion_buffer *buffer,
262 buffer->priv_virt = kzalloc(len, GFP_KERNEL);
263 if (!buffer->priv_virt)
268 void ion_system_contig_heap_free(struct ion_buffer *buffer)
270 kfree(buffer->priv_virt);
273 static int ion_system_contig_heap_phys(struct ion_heap *heap,
274 struct ion_buffer *buffer,
275 ion_phys_addr_t *addr, size_t *len)
277 *addr = virt_to_phys(buffer->priv_virt);
282 struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
283 struct ion_buffer *buffer)
285 struct sg_table *table;
288 table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
290 return ERR_PTR(-ENOMEM);
291 ret = sg_alloc_table(table, 1, GFP_KERNEL);
296 sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
301 void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
302 struct ion_buffer *buffer)
304 sg_free_table(buffer->sg_table);
305 kfree(buffer->sg_table);
308 int ion_system_contig_heap_map_user(struct ion_heap *heap,
309 struct ion_buffer *buffer,
310 struct vm_area_struct *vma)
312 unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
313 return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
314 vma->vm_end - vma->vm_start,
319 static struct ion_heap_ops kmalloc_ops = {
320 .allocate = ion_system_contig_heap_allocate,
321 .free = ion_system_contig_heap_free,
322 .phys = ion_system_contig_heap_phys,
323 .map_dma = ion_system_contig_heap_map_dma,
324 .unmap_dma = ion_system_contig_heap_unmap_dma,
325 .map_kernel = ion_system_heap_map_kernel,
326 .unmap_kernel = ion_system_heap_unmap_kernel,
327 .map_user = ion_system_contig_heap_map_user,
330 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
332 struct ion_heap *heap;
334 heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
336 return ERR_PTR(-ENOMEM);
337 heap->ops = &kmalloc_ops;
338 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
342 void ion_system_contig_heap_destroy(struct ion_heap *heap)