ion: fix sparse warnings
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion_system_heap.c
index 7397bc41fb45f0d869107a2cffcfb86e9feb1e93..301d019a32704a61b60795fceeba5fdb95ec55f2 100644 (file)
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
+#include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include "ion.h"
 #include "ion_priv.h"
 
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+                                    __GFP_NORETRY) & ~__GFP_WAIT;
+static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN);
+static const unsigned int orders[] = {8, 4, 0};
+static const int num_orders = ARRAY_SIZE(orders);
+static int order_to_index(unsigned int order)
+{
+       int i;
+       for (i = 0; i < num_orders; i++)
+               if (order == orders[i])
+                       return i;
+       BUG();
+       return -1;
+}
+
+static unsigned int order_to_size(int order)
+{
+       return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+       struct ion_heap heap;
+       struct ion_page_pool **pools;
+};
+
 struct page_info {
        struct page *page;
-       unsigned long order;
+       unsigned int order;
        struct list_head list;
 };
 
-static struct page_info *alloc_largest_available(unsigned long size)
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+                                     struct ion_buffer *buffer,
+                                     unsigned long order)
+{
+       bool cached = ion_buffer_cached(buffer);
+       struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+       struct page *page;
+
+       if (!cached) {
+               page = ion_page_pool_alloc(pool);
+       } else {
+               gfp_t gfp_flags = low_order_gfp_flags;
+
+               if (order > 4)
+                       gfp_flags = high_order_gfp_flags;
+               page = alloc_pages(gfp_flags, order);
+               if (!page)
+                       return NULL;
+               ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+                                               DMA_BIDIRECTIONAL);
+       }
+       if (!page)
+               return NULL;
+
+       return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+                            struct ion_buffer *buffer, struct page *page,
+                            unsigned int order)
+{
+       bool cached = ion_buffer_cached(buffer);
+
+       if (!cached) {
+               struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+               ion_page_pool_free(pool, page);
+       } else {
+               __free_pages(page, order);
+       }
+}
+
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+                                                struct ion_buffer *buffer,
+                                                unsigned long size,
+                                                unsigned int max_order)
 {
-       static unsigned int orders[] = {8, 4, 0};
        struct page *page;
        struct page_info *info;
        int i;
 
-       for (i = 0; i < ARRAY_SIZE(orders); i++) {
-               if (size < (1 << orders[i]) * PAGE_SIZE)
+       for (i = 0; i < num_orders; i++) {
+               if (size < order_to_size(orders[i]))
+                       continue;
+               if (max_order < orders[i])
                        continue;
-               page = alloc_pages(GFP_HIGHUSER | __GFP_ZERO |
-                                  __GFP_NOWARN | __GFP_NORETRY, orders[i]);
+
+               page = alloc_buffer_page(heap, buffer, orders[i]);
                if (!page)
                        continue;
-               split_page(page, orders[i]);
-               info = kmap(page);
+
+               info = kmalloc(sizeof(struct page_info), GFP_KERNEL);
                info->page = page;
                info->order = orders[i];
                return info;
@@ -59,170 +131,224 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
                                     unsigned long size, unsigned long align,
                                     unsigned long flags)
 {
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
        struct sg_table *table;
        struct scatterlist *sg;
        int ret;
        struct list_head pages;
        struct page_info *info, *tmp_info;
-       int i;
+       int i = 0;
        long size_remaining = PAGE_ALIGN(size);
+       unsigned int max_order = orders[0];
+
+       if (align > PAGE_SIZE)
+               return -EINVAL;
 
        INIT_LIST_HEAD(&pages);
        while (size_remaining > 0) {
-               info = alloc_largest_available(size_remaining);
+               info = alloc_largest_available(sys_heap, buffer, size_remaining, max_order);
                if (!info)
                        goto err;
                list_add_tail(&info->list, &pages);
                size_remaining -= (1 << info->order) * PAGE_SIZE;
+               max_order = info->order;
+               i++;
        }
-
-       table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
        if (!table)
                goto err;
 
-       ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
+       ret = sg_alloc_table(table, i, GFP_KERNEL);
        if (ret)
                goto err1;
 
        sg = table->sgl;
        list_for_each_entry_safe(info, tmp_info, &pages, list) {
                struct page *page = info->page;
-               for (i = 0; i < (1 << info->order); i++) {
-                       sg_set_page(sg, page + i, PAGE_SIZE, 0);
-                       sg = sg_next(sg);
-               }
+               sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+               sg = sg_next(sg);
                list_del(&info->list);
-               memset(info, 0, sizeof(struct page_info));
-               kunmap(page);
+               kfree(info);
        }
 
-       dma_sync_sg_for_device(NULL, table->sgl, table->nents,
-                              DMA_BIDIRECTIONAL);
-
        buffer->priv_virt = table;
        return 0;
 err1:
        kfree(table);
 err:
-       list_for_each_entry(info, &pages, list) {
-               for (i = 0; i < (1 << info->order); i++)
-                       __free_page(info->page + i);
-               kunmap(info->page);
+       list_for_each_entry_safe(info, tmp_info, &pages, list) {
+               free_buffer_page(sys_heap, buffer, info->page, info->order);
+               kfree(info);
        }
        return -ENOMEM;
 }
 
-void ion_system_heap_free(struct ion_buffer *buffer)
+static void ion_system_heap_free(struct ion_buffer *buffer)
 {
-       int i;
+       struct ion_heap *heap = buffer->heap;
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       struct sg_table *table = buffer->sg_table;
+       bool cached = ion_buffer_cached(buffer);
        struct scatterlist *sg;
-       struct sg_table *table = buffer->priv_virt;
+       LIST_HEAD(pages);
+       int i;
+
+       /* uncached pages come from the page pools, zero them before returning
+          for security purposes (other allocations are zerod at alloc time */
+       if (!cached)
+               ion_heap_buffer_zero(buffer);
 
        for_each_sg(table->sgl, sg, table->nents, i)
-               __free_pages(sg_page(sg), get_order(sg_dma_len(sg)));
-       if (buffer->sg_table)
-               sg_free_table(buffer->sg_table);
-       kfree(buffer->sg_table);
+               free_buffer_page(sys_heap, buffer, sg_page(sg),
+                               get_order(sg->length));
+       sg_free_table(table);
+       kfree(table);
 }
 
-struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
-                                        struct ion_buffer *buffer)
+static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+                                               struct ion_buffer *buffer)
 {
        return buffer->priv_virt;
 }
 
-void ion_system_heap_unmap_dma(struct ion_heap *heap,
-                              struct ion_buffer *buffer)
+static void ion_system_heap_unmap_dma(struct ion_heap *heap,
+                                     struct ion_buffer *buffer)
 {
        return;
 }
 
-void *ion_system_heap_map_kernel(struct ion_heap *heap,
-                                struct ion_buffer *buffer)
-{
-       struct scatterlist *sg;
-       int i, j;
-       void *vaddr;
-       pgprot_t pgprot;
-       struct sg_table *table = buffer->priv_virt;
-       int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-       struct page **pages = kzalloc(sizeof(struct page *) * npages,
-                                    GFP_KERNEL);
-       struct page **tmp = pages;
-
-       if (buffer->flags & ION_FLAG_CACHED)
-               pgprot = PAGE_KERNEL;
-       else
-               pgprot = pgprot_writecombine(PAGE_KERNEL);
-
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               int npages_this_entry = PAGE_ALIGN(sg_dma_len(sg)) / PAGE_SIZE;
-               struct page *page = sg_page(sg);
-               BUG_ON(i >= npages);
-               for (j = 0; j < npages_this_entry; j++) {
-                       *(tmp++) = page++;
-               }
+static struct ion_heap_ops system_heap_ops = {
+       .allocate = ion_system_heap_allocate,
+       .free = ion_system_heap_free,
+       .map_dma = ion_system_heap_map_dma,
+       .unmap_dma = ion_system_heap_unmap_dma,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .map_user = ion_heap_map_user,
+};
+
+static int ion_system_heap_shrink(struct shrinker *shrinker,
+                                 struct shrink_control *sc) {
+
+       struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+                                            shrinker);
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int nr_total = 0;
+       int nr_freed = 0;
+       int i;
+
+       if (sc->nr_to_scan == 0)
+               goto end;
+
+       /* shrink the free list first, no point in zeroing the memory if
+          we're just going to reclaim it */
+       nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) /
+               PAGE_SIZE;
+
+       if (nr_freed >= sc->nr_to_scan)
+               goto end;
+
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+
+               nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask,
+                                                sc->nr_to_scan);
+               if (nr_freed >= sc->nr_to_scan)
+                       break;
        }
-       vaddr = vmap(pages, npages, VM_MAP, pgprot);
-       kfree(pages);
 
-       return vaddr;
-}
+end:
+       /* total number of items is whatever the page pools are holding
+          plus whatever's in the freelist */
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+               nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0);
+       }
+       nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE;
+       return nr_total;
 
-void ion_system_heap_unmap_kernel(struct ion_heap *heap,
-                                 struct ion_buffer *buffer)
-{
-       vunmap(buffer->vaddr);
 }
 
-int ion_system_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-                            struct vm_area_struct *vma)
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+                                     void *unused)
 {
-       struct sg_table *table = buffer->priv_virt;
-       unsigned long addr = vma->vm_start;
-       unsigned long offset = vma->vm_pgoff;
-       struct scatterlist *sg;
-       int i;
 
-       for_each_sg(table->sgl, sg, table->nents, i) {
-               if (offset) {
-                       offset--;
-                       continue;
-               }
-               remap_pfn_range(vma, addr, page_to_pfn(sg_page(sg)),
-                               sg_dma_len(sg), vma->vm_page_prot);
-               addr += sg_dma_len(sg);
-               if (addr >= vma->vm_end)
-                       return 0;
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int i;
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool = sys_heap->pools[i];
+               seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
+                          pool->high_count, pool->order,
+                          (1 << pool->order) * PAGE_SIZE * pool->high_count);
+               seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n",
+                          pool->low_count, pool->order,
+                          (1 << pool->order) * PAGE_SIZE * pool->low_count);
        }
        return 0;
 }
 
-static struct ion_heap_ops vmalloc_ops = {
-       .allocate = ion_system_heap_allocate,
-       .free = ion_system_heap_free,
-       .map_dma = ion_system_heap_map_dma,
-       .unmap_dma = ion_system_heap_unmap_dma,
-       .map_kernel = ion_system_heap_map_kernel,
-       .unmap_kernel = ion_system_heap_unmap_kernel,
-       .map_user = ion_system_heap_map_user,
-};
-
 struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
 {
-       struct ion_heap *heap;
+       struct ion_system_heap *heap;
+       int i;
 
-       heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+       heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL);
        if (!heap)
                return ERR_PTR(-ENOMEM);
-       heap->ops = &vmalloc_ops;
-       heap->type = ION_HEAP_TYPE_SYSTEM;
-       return heap;
+       heap->heap.ops = &system_heap_ops;
+       heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+       heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+       heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders,
+                             GFP_KERNEL);
+       if (!heap->pools)
+               goto err_alloc_pools;
+       for (i = 0; i < num_orders; i++) {
+               struct ion_page_pool *pool;
+               gfp_t gfp_flags = low_order_gfp_flags;
+
+               if (orders[i] > 4)
+                       gfp_flags = high_order_gfp_flags;
+               pool = ion_page_pool_create(gfp_flags, orders[i]);
+               if (!pool)
+                       goto err_create_pool;
+               heap->pools[i] = pool;
+       }
+
+       heap->heap.shrinker.shrink = ion_system_heap_shrink;
+       heap->heap.shrinker.seeks = DEFAULT_SEEKS;
+       heap->heap.shrinker.batch = 0;
+       register_shrinker(&heap->heap.shrinker);
+       heap->heap.debug_show = ion_system_heap_debug_show;
+       return &heap->heap;
+err_create_pool:
+       for (i = 0; i < num_orders; i++)
+               if (heap->pools[i])
+                       ion_page_pool_destroy(heap->pools[i]);
+       kfree(heap->pools);
+err_alloc_pools:
+       kfree(heap);
+       return ERR_PTR(-ENOMEM);
 }
 
 void ion_system_heap_destroy(struct ion_heap *heap)
 {
-       kfree(heap);
+       struct ion_system_heap *sys_heap = container_of(heap,
+                                                       struct ion_system_heap,
+                                                       heap);
+       int i;
+
+       for (i = 0; i < num_orders; i++)
+               ion_page_pool_destroy(sys_heap->pools[i]);
+       kfree(sys_heap->pools);
+       kfree(sys_heap);
 }
 
 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
@@ -231,61 +357,83 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
                                           unsigned long align,
                                           unsigned long flags)
 {
-       buffer->priv_virt = kzalloc(len, GFP_KERNEL);
-       if (!buffer->priv_virt)
+       int order = get_order(len);
+       struct page *page;
+       struct sg_table *table;
+       unsigned long i;
+       int ret;
+
+       if (align > (PAGE_SIZE << order))
+               return -EINVAL;
+
+       page = alloc_pages(low_order_gfp_flags, order);
+       if (!page)
                return -ENOMEM;
+
+       split_page(page, order);
+
+       len = PAGE_ALIGN(len);
+       for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+               __free_page(page + i);
+
+       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+       if (!table) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       ret = sg_alloc_table(table, 1, GFP_KERNEL);
+       if (ret)
+               goto out;
+
+       sg_set_page(table->sgl, page, len, 0);
+
+       buffer->priv_virt = table;
+
+       ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
        return 0;
+
+out:
+       for (i = 0; i < len >> PAGE_SHIFT; i++)
+               __free_page(page + i);
+       kfree(table);
+       return ret;
 }
 
-void ion_system_contig_heap_free(struct ion_buffer *buffer)
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
 {
-       kfree(buffer->priv_virt);
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+       unsigned long i;
+
+       for (i = 0; i < pages; i++)
+               __free_page(page + i);
+       sg_free_table(table);
+       kfree(table);
 }
 
 static int ion_system_contig_heap_phys(struct ion_heap *heap,
                                       struct ion_buffer *buffer,
                                       ion_phys_addr_t *addr, size_t *len)
 {
-       *addr = virt_to_phys(buffer->priv_virt);
+       struct sg_table *table = buffer->priv_virt;
+       struct page *page = sg_page(table->sgl);
+       *addr = page_to_phys(page);
        *len = buffer->size;
        return 0;
 }
 
-struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
                                                struct ion_buffer *buffer)
 {
-       struct sg_table *table;
-       int ret;
-
-       table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
-       if (!table)
-               return ERR_PTR(-ENOMEM);
-       ret = sg_alloc_table(table, 1, GFP_KERNEL);
-       if (ret) {
-               kfree(table);
-               return ERR_PTR(ret);
-       }
-       sg_set_page(table->sgl, virt_to_page(buffer->priv_virt), buffer->size,
-                   0);
-       return table;
-}
-
-void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
-                                     struct ion_buffer *buffer)
-{
-       sg_free_table(buffer->sg_table);
-       kfree(buffer->sg_table);
+       return buffer->priv_virt;
 }
 
-int ion_system_contig_heap_map_user(struct ion_heap *heap,
-                                   struct ion_buffer *buffer,
-                                   struct vm_area_struct *vma)
+static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+                                            struct ion_buffer *buffer)
 {
-       unsigned long pfn = __phys_to_pfn(virt_to_phys(buffer->priv_virt));
-       return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
-                              vma->vm_end - vma->vm_start,
-                              vma->vm_page_prot);
-
 }
 
 static struct ion_heap_ops kmalloc_ops = {
@@ -294,9 +442,9 @@ static struct ion_heap_ops kmalloc_ops = {
        .phys = ion_system_contig_heap_phys,
        .map_dma = ion_system_contig_heap_map_dma,
        .unmap_dma = ion_system_contig_heap_unmap_dma,
-       .map_kernel = ion_system_heap_map_kernel,
-       .unmap_kernel = ion_system_heap_unmap_kernel,
-       .map_user = ion_system_contig_heap_map_user,
+       .map_kernel = ion_heap_map_kernel,
+       .unmap_kernel = ion_heap_unmap_kernel,
+       .map_user = ion_heap_map_user,
 };
 
 struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)