ion: fix sparse warnings
[firefly-linux-kernel-4.4.55.git] / drivers / staging / android / ion / ion.c
index c775e4f2c5a5ead2357487dbe24c6fcfb13d46ec..559e4ee6f7e5c0137cfc21e75c9fc25012990a8e 100644 (file)
 
 #include <linux/device.h>
 #include <linux/file.h>
+#include <linux/freezer.h>
 #include <linux/fs.h>
 #include <linux/anon_inodes.h>
+#include <linux/kthread.h>
 #include <linux/list.h>
 #include <linux/memblock.h>
 #include <linux/miscdevice.h>
 #include <linux/mm.h>
 #include <linux/mm_types.h>
 #include <linux/rbtree.h>
-#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/seq_file.h>
 #include <linux/uaccess.h>
+#include <linux/vmalloc.h>
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
+#include <linux/idr.h>
 
 #include "ion.h"
 #include "ion_priv.h"
+#include "compat_ion.h"
 
 /**
  * struct ion_device - the metadata of the ion device node
@@ -62,8 +66,8 @@ struct ion_device {
  * @node:              node in the tree of all clients
  * @dev:               backpointer to ion device
  * @handles:           an rb tree of all the handles in this client
+ * @idr:               an idr space for allocating handle ids
  * @lock:              lock protecting the tree of handles
- * @heap_type_mask:    mask of all supported heap types
  * @name:              used for debugging
  * @task:              used for debugging
  *
@@ -75,8 +79,8 @@ struct ion_client {
        struct rb_node node;
        struct ion_device *dev;
        struct rb_root handles;
+       struct idr idr;
        struct mutex lock;
-       unsigned int heap_type_mask;
        const char *name;
        struct task_struct *task;
        pid_t pid;
@@ -90,7 +94,7 @@ struct ion_client {
  * @buffer:            pointer to the buffer
  * @node:              node in the client's handle rbtree
  * @kmap_cnt:          count of times this client has mapped to kernel
- * @dmap_cnt:          count of times this client has mapped for dma
+ * @id:                        client-unique id allocated by client->idr
  *
  * Modifications to node, map_cnt or mapping should be protected by the
  * lock in the client.  Other fields are never changed after initialization.
@@ -101,17 +105,38 @@ struct ion_handle {
        struct ion_buffer *buffer;
        struct rb_node node;
        unsigned int kmap_cnt;
+       int id;
 };
 
 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
 {
-        return ((buffer->flags & ION_FLAG_CACHED) &&
-                !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+       return ((buffer->flags & ION_FLAG_CACHED) &&
+               !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
 }
 
 bool ion_buffer_cached(struct ion_buffer *buffer)
 {
-        return !!(buffer->flags & ION_FLAG_CACHED);
+       return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+       return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+       return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+       *page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+       *page = (struct page *)((unsigned long)(*page) & ~(1UL));
 }
 
 /* this function should only be called while dev->lock is held */
@@ -140,8 +165,6 @@ static void ion_buffer_add(struct ion_device *dev,
        rb_insert_color(&buffer->node, &dev->buffers);
 }
 
-static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
-
 /* this function should only be called while dev->lock is held */
 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
                                     struct ion_device *dev,
@@ -163,33 +186,48 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
        kref_init(&buffer->ref);
 
        ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
        if (ret) {
-               kfree(buffer);
-               return ERR_PTR(ret);
+               if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+                       goto err2;
+
+               ion_heap_freelist_drain(heap, 0);
+               ret = heap->ops->allocate(heap, buffer, len, align,
+                                         flags);
+               if (ret)
+                       goto err2;
        }
 
        buffer->dev = dev;
        buffer->size = len;
 
        table = heap->ops->map_dma(heap, buffer);
-       if (IS_ERR_OR_NULL(table)) {
+       if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
+               table = ERR_PTR(-EINVAL);
+       if (IS_ERR(table)) {
                heap->ops->free(buffer);
                kfree(buffer);
                return ERR_PTR(PTR_ERR(table));
        }
        buffer->sg_table = table;
        if (ion_buffer_fault_user_mappings(buffer)) {
-               for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
-                           i) {
-                       if (sg_dma_len(sg) == PAGE_SIZE)
-                               continue;
-                       pr_err("%s: cached mappings that will be faulted in "
-                              "must have pagewise sg_lists\n", __func__);
-                       ret = -EINVAL;
-                       goto err;
+               int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+               struct scatterlist *sg;
+               int i, j, k = 0;
+
+               buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+               if (!buffer->pages) {
+                       ret = -ENOMEM;
+                       goto err1;
+               }
+
+               for_each_sg(table->sgl, sg, table->nents, i) {
+                       struct page *page = sg_page(sg);
+
+                       for (j = 0; j < sg->length / PAGE_SIZE; j++)
+                               buffer->pages[k++] = page++;
                }
 
-               ret = ion_buffer_alloc_dirty(buffer);
                if (ret)
                        goto err;
        }
@@ -216,25 +254,39 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
 err:
        heap->ops->unmap_dma(heap, buffer);
        heap->ops->free(buffer);
+err1:
+       if (buffer->pages)
+               vfree(buffer->pages);
+err2:
        kfree(buffer);
        return ERR_PTR(ret);
 }
 
-static void ion_buffer_destroy(struct kref *kref)
+void ion_buffer_destroy(struct ion_buffer *buffer)
 {
-       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
-       struct ion_device *dev = buffer->dev;
-
        if (WARN_ON(buffer->kmap_cnt > 0))
                buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
        buffer->heap->ops->unmap_dma(buffer->heap, buffer);
        buffer->heap->ops->free(buffer);
+       if (buffer->pages)
+               vfree(buffer->pages);
+       kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+       struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+       struct ion_heap *heap = buffer->heap;
+       struct ion_device *dev = buffer->dev;
+
        mutex_lock(&dev->buffer_lock);
        rb_erase(&buffer->node, &dev->buffers);
        mutex_unlock(&dev->buffer_lock);
-       if (buffer->flags & ION_FLAG_CACHED)
-               kfree(buffer->dirty);
-       kfree(buffer);
+
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               ion_heap_freelist_add(heap, buffer);
+       else
+               ion_buffer_destroy(buffer);
 }
 
 static void ion_buffer_get(struct ion_buffer *buffer)
@@ -244,7 +296,7 @@ static void ion_buffer_get(struct ion_buffer *buffer)
 
 static int ion_buffer_put(struct ion_buffer *buffer)
 {
-       return kref_put(&buffer->ref, ion_buffer_destroy);
+       return kref_put(&buffer->ref, _ion_buffer_destroy);
 }
 
 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
@@ -309,6 +361,7 @@ static void ion_handle_destroy(struct kref *kref)
                ion_handle_kmap_put(handle);
        mutex_unlock(&buffer->lock);
 
+       idr_remove(&client->idr, handle->id);
        if (!RB_EMPTY_NODE(&handle->node))
                rb_erase(&handle->node, &client->handles);
 
@@ -330,53 +383,73 @@ static void ion_handle_get(struct ion_handle *handle)
 
 static int ion_handle_put(struct ion_handle *handle)
 {
-       return kref_put(&handle->ref, ion_handle_destroy);
-}
+       struct ion_client *client = handle->client;
+       int ret;
 
-static struct ion_handle *ion_handle_lookup(struct ion_client *client,
-                                           struct ion_buffer *buffer)
-{
-       struct rb_node *n;
+       mutex_lock(&client->lock);
+       ret = kref_put(&handle->ref, ion_handle_destroy);
+       mutex_unlock(&client->lock);
 
-       for (n = rb_first(&client->handles); n; n = rb_next(n)) {
-               struct ion_handle *handle = rb_entry(n, struct ion_handle,
-                                                    node);
-               if (handle->buffer == buffer)
-                       return handle;
-       }
-       return NULL;
+       return ret;
 }
 
-static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+                                           struct ion_buffer *buffer)
 {
        struct rb_node *n = client->handles.rb_node;
 
        while (n) {
-               struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
-                                                         node);
-               if (handle < handle_node)
+               struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+               if (buffer < entry->buffer)
                        n = n->rb_left;
-               else if (handle > handle_node)
+               else if (buffer > entry->buffer)
                        n = n->rb_right;
                else
-                       return true;
+                       return entry;
        }
-       return false;
+       return ERR_PTR(-EINVAL);
+}
+
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+                                               int id)
+{
+       struct ion_handle *handle;
+
+       mutex_lock(&client->lock);
+       handle = idr_find(&client->idr, id);
+       if (handle)
+               ion_handle_get(handle);
+       mutex_unlock(&client->lock);
+
+       return handle ? handle : ERR_PTR(-EINVAL);
 }
 
-static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+{
+       WARN_ON(!mutex_is_locked(&client->lock));
+       return (idr_find(&client->idr, handle->id) == handle);
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
 {
+       int id;
        struct rb_node **p = &client->handles.rb_node;
        struct rb_node *parent = NULL;
        struct ion_handle *entry;
 
+       id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+       if (id < 0)
+               return id;
+
+       handle->id = id;
+
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct ion_handle, node);
 
-               if (handle < entry)
+               if (handle->buffer < entry->buffer)
                        p = &(*p)->rb_left;
-               else if (handle > entry)
+               else if (handle->buffer > entry->buffer)
                        p = &(*p)->rb_right;
                else
                        WARN(1, "%s: buffer already found.", __func__);
@@ -384,6 +457,8 @@ static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
 
        rb_link_node(&handle->node, parent, p);
        rb_insert_color(&handle->node, &client->handles);
+
+       return 0;
 }
 
 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
@@ -394,8 +469,9 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
        struct ion_device *dev = client->dev;
        struct ion_buffer *buffer = NULL;
        struct ion_heap *heap;
+       int ret;
 
-       pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__,
+       pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
                 len, align, heap_id_mask, flags);
        /*
         * traverse the list of heaps available in this system in priority
@@ -403,21 +479,18 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
         * request of the caller allocate from it.  Repeat until allocate has
         * succeeded or all heaps have been tried
         */
-       if (WARN_ON(!len))
-               return ERR_PTR(-EINVAL);
-
        len = PAGE_ALIGN(len);
 
+       if (!len)
+               return ERR_PTR(-EINVAL);
+
        down_read(&dev->lock);
        plist_for_each_entry(heap, &dev->heaps, node) {
-               /* if the client doesn't support this heap type */
-               if (!((1 << heap->type) & client->heap_type_mask))
-                       continue;
                /* if the caller didn't specify this heap id */
                if (!((1 << heap->id) & heap_id_mask))
                        continue;
                buffer = ion_buffer_create(heap, dev, len, align, flags);
-               if (!IS_ERR_OR_NULL(buffer))
+               if (!IS_ERR(buffer))
                        break;
        }
        up_read(&dev->lock);
@@ -436,12 +509,16 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
         */
        ion_buffer_put(buffer);
 
-       if (!IS_ERR(handle)) {
-               mutex_lock(&client->lock);
-               ion_handle_add(client, handle);
-               mutex_unlock(&client->lock);
-       }
+       if (IS_ERR(handle))
+               return handle;
 
+       mutex_lock(&client->lock);
+       ret = ion_handle_add(client, handle);
+       mutex_unlock(&client->lock);
+       if (ret) {
+               ion_handle_put(handle);
+               handle = ERR_PTR(ret);
+       }
 
        return handle;
 }
@@ -461,8 +538,8 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
                mutex_unlock(&client->lock);
                return;
        }
-       ion_handle_put(handle);
        mutex_unlock(&client->lock);
+       ion_handle_put(handle);
 }
 EXPORT_SYMBOL(ion_free);
 
@@ -501,7 +578,9 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
                return buffer->vaddr;
        }
        vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
-       if (IS_ERR_OR_NULL(vaddr))
+       if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
+               return ERR_PTR(-EINVAL);
+       if (IS_ERR(vaddr))
                return vaddr;
        buffer->vaddr = vaddr;
        buffer->kmap_cnt++;
@@ -518,7 +597,7 @@ static void *ion_handle_kmap_get(struct ion_handle *handle)
                return buffer->vaddr;
        }
        vaddr = ion_buffer_kmap_get(buffer);
-       if (IS_ERR_OR_NULL(vaddr))
+       if (IS_ERR(vaddr))
                return vaddr;
        handle->kmap_cnt++;
        return vaddr;
@@ -590,7 +669,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
        struct ion_client *client = s->private;
        struct rb_node *n;
        size_t sizes[ION_NUM_HEAP_IDS] = {0};
-       const char *names[ION_NUM_HEAP_IDS] = {0};
+       const char *names[ION_NUM_HEAP_IDS] = {NULL};
        int i;
 
        mutex_lock(&client->lock);
@@ -609,7 +688,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused)
        for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
                if (!names[i])
                        continue;
-               seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
+               seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
        }
        return 0;
 }
@@ -627,7 +706,6 @@ static const struct file_operations debug_client_fops = {
 };
 
 struct ion_client *ion_client_create(struct ion_device *dev,
-                                    unsigned int heap_type_mask,
                                     const char *name)
 {
        struct ion_client *client;
@@ -660,9 +738,9 @@ struct ion_client *ion_client_create(struct ion_device *dev,
 
        client->dev = dev;
        client->handles = RB_ROOT;
+       idr_init(&client->idr);
        mutex_init(&client->lock);
        client->name = name;
-       client->heap_type_mask = heap_type_mask;
        client->task = task;
        client->pid = pid;
 
@@ -688,6 +766,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
 
        return client;
 }
+EXPORT_SYMBOL(ion_client_create);
 
 void ion_client_destroy(struct ion_client *client)
 {
@@ -700,6 +779,9 @@ void ion_client_destroy(struct ion_client *client)
                                                     node);
                ion_handle_destroy(&handle->ref);
        }
+
+       idr_destroy(&client->idr);
+
        down_write(&dev->lock);
        if (client->task)
                put_task_struct(client->task);
@@ -751,15 +833,20 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
 {
 }
 
-static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+               size_t size, enum dma_data_direction dir)
 {
-       unsigned long pages = buffer->sg_table->nents;
-       unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
+       struct scatterlist sg;
 
-       buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
-       if (!buffer->dirty)
-               return -ENOMEM;
-       return 0;
+       sg_init_table(&sg, 1);
+       sg_set_page(&sg, page, size, 0);
+       /*
+        * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+        * for the the targeted device, but this works on the currently targeted
+        * hardware.
+        */
+       sg_dma_address(&sg) = page_to_phys(page);
+       dma_sync_sg_for_device(dev, &sg, 1, dir);
 }
 
 struct ion_vma_list {
@@ -771,9 +858,9 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
                                       struct device *dev,
                                       enum dma_data_direction dir)
 {
-       struct scatterlist *sg;
-       int i;
        struct ion_vma_list *vma_list;
+       int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+       int i;
 
        pr_debug("%s: syncing for device %s\n", __func__,
                 dev ? dev_name(dev) : "null");
@@ -782,11 +869,14 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
                return;
 
        mutex_lock(&buffer->lock);
-       for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
-               if (!test_bit(i, buffer->dirty))
-                       continue;
-               dma_sync_sg_for_device(dev, sg, 1, dir);
-               clear_bit(i, buffer->dirty);
+       for (i = 0; i < pages; i++) {
+               struct page *page = buffer->pages[i];
+
+               if (ion_buffer_page_is_dirty(page))
+                       ion_pages_sync_for_device(dev, ion_buffer_page(page),
+                                                       PAGE_SIZE, dir);
+
+               ion_buffer_page_clean(buffer->pages + i);
        }
        list_for_each_entry(vma_list, &buffer->vmas, list) {
                struct vm_area_struct *vma = vma_list->vma;
@@ -797,24 +887,22 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
        mutex_unlock(&buffer->lock);
 }
 
-int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        struct ion_buffer *buffer = vma->vm_private_data;
-       struct scatterlist *sg;
-       int i;
+       unsigned long pfn;
+       int ret;
 
        mutex_lock(&buffer->lock);
-       set_bit(vmf->pgoff, buffer->dirty);
+       ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+       BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
 
-       for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
-               if (i != vmf->pgoff)
-                       continue;
-               dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
-               vm_insert_page(vma, (unsigned long)vmf->virtual_address,
-                              sg_page(sg));
-               break;
-       }
+       pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+       ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
        mutex_unlock(&buffer->lock);
+       if (ret)
+               return VM_FAULT_ERROR;
+
        return VM_FAULT_NOPAGE;
 }
 
@@ -851,7 +939,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
        mutex_unlock(&buffer->lock);
 }
 
-struct vm_operations_struct ion_vma_ops = {
+static struct vm_operations_struct ion_vma_ops = {
        .open = ion_vm_open,
        .close = ion_vm_close,
        .fault = ion_vm_fault,
@@ -869,6 +957,8 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
        }
 
        if (ion_buffer_fault_user_mappings(buffer)) {
+               vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+                                                       VM_DONTDUMP;
                vma->vm_private_data = buffer;
                vma->vm_ops = &ion_vma_ops;
                ion_vm_open(vma);
@@ -926,8 +1016,6 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
        mutex_unlock(&buffer->lock);
        if (IS_ERR(vaddr))
                return PTR_ERR(vaddr);
-       if (!vaddr)
-               return -ENOMEM;
        return 0;
 }
 
@@ -942,7 +1030,7 @@ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
        mutex_unlock(&buffer->lock);
 }
 
-struct dma_buf_ops dma_buf_ops = {
+static struct dma_buf_ops dma_buf_ops = {
        .map_dma_buf = ion_map_dma_buf,
        .unmap_dma_buf = ion_unmap_dma_buf,
        .mmap = ion_mmap,
@@ -955,44 +1043,60 @@ struct dma_buf_ops dma_buf_ops = {
        .kunmap = ion_dma_buf_kunmap,
 };
 
-int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+                                               struct ion_handle *handle)
 {
        struct ion_buffer *buffer;
        struct dma_buf *dmabuf;
        bool valid_handle;
-       int fd;
 
        mutex_lock(&client->lock);
        valid_handle = ion_handle_validate(client, handle);
-       mutex_unlock(&client->lock);
        if (!valid_handle) {
                WARN(1, "%s: invalid handle passed to share.\n", __func__);
-               return -EINVAL;
+               mutex_unlock(&client->lock);
+               return ERR_PTR(-EINVAL);
        }
-
        buffer = handle->buffer;
        ion_buffer_get(buffer);
+       mutex_unlock(&client->lock);
+
        dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
        if (IS_ERR(dmabuf)) {
                ion_buffer_put(buffer);
-               return PTR_ERR(dmabuf);
+               return dmabuf;
        }
+
+       return dmabuf;
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+       struct dma_buf *dmabuf;
+       int fd;
+
+       dmabuf = ion_share_dma_buf(client, handle);
+       if (IS_ERR(dmabuf))
+               return PTR_ERR(dmabuf);
+
        fd = dma_buf_fd(dmabuf, O_CLOEXEC);
        if (fd < 0)
                dma_buf_put(dmabuf);
 
        return fd;
 }
-EXPORT_SYMBOL(ion_share_dma_buf);
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
 
 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
 {
        struct dma_buf *dmabuf;
        struct ion_buffer *buffer;
        struct ion_handle *handle;
+       int ret;
 
        dmabuf = dma_buf_get(fd);
-       if (IS_ERR_OR_NULL(dmabuf))
+       if (IS_ERR(dmabuf))
                return ERR_PTR(PTR_ERR(dmabuf));
        /* if this memory came from ion */
 
@@ -1007,16 +1111,26 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
        mutex_lock(&client->lock);
        /* if a handle exists for this buffer just take a reference to it */
        handle = ion_handle_lookup(client, buffer);
-       if (!IS_ERR_OR_NULL(handle)) {
+       if (!IS_ERR(handle)) {
                ion_handle_get(handle);
+               mutex_unlock(&client->lock);
                goto end;
        }
+       mutex_unlock(&client->lock);
+
        handle = ion_handle_create(client, buffer);
-       if (IS_ERR_OR_NULL(handle))
+       if (IS_ERR(handle))
                goto end;
-       ion_handle_add(client, handle);
-end:
+
+       mutex_lock(&client->lock);
+       ret = ion_handle_add(client, handle);
        mutex_unlock(&client->lock);
+       if (ret) {
+               ion_handle_put(handle);
+               handle = ERR_PTR(ret);
+       }
+
+end:
        dma_buf_put(dmabuf);
        return handle;
 }
@@ -1028,7 +1142,7 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
        struct ion_buffer *buffer;
 
        dmabuf = dma_buf_get(fd);
-       if (IS_ERR_OR_NULL(dmabuf))
+       if (IS_ERR(dmabuf))
                return PTR_ERR(dmabuf);
 
        /* if this memory came from ion */
@@ -1046,103 +1160,120 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
        return 0;
 }
 
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+       switch (cmd) {
+       case ION_IOC_SYNC:
+       case ION_IOC_FREE:
+       case ION_IOC_CUSTOM:
+               return _IOC_WRITE;
+       default:
+               return _IOC_DIR(cmd);
+       }
+}
+
 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct ion_client *client = filp->private_data;
+       struct ion_device *dev = client->dev;
+       struct ion_handle *cleanup_handle = NULL;
+       int ret = 0;
+       unsigned int dir;
+
+       union {
+               struct ion_fd_data fd;
+               struct ion_allocation_data allocation;
+               struct ion_handle_data handle;
+               struct ion_custom_data custom;
+       } data;
+
+       dir = ion_ioctl_dir(cmd);
+
+       if (_IOC_SIZE(cmd) > sizeof(data))
+               return -EINVAL;
+
+       if (dir & _IOC_WRITE)
+               if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
 
        switch (cmd) {
        case ION_IOC_ALLOC:
        {
-               struct ion_allocation_data data;
+               struct ion_handle *handle;
 
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
-                       return -EFAULT;
-               data.handle = ion_alloc(client, data.len, data.align,
-                                            data.heap_id_mask, data.flags);
+               handle = ion_alloc(client, data.allocation.len,
+                                               data.allocation.align,
+                                               data.allocation.heap_id_mask,
+                                               data.allocation.flags);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
 
-               if (IS_ERR(data.handle))
-                       return PTR_ERR(data.handle);
+               data.allocation.handle = handle->id;
 
-               if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
-                       ion_free(client, data.handle);
-                       return -EFAULT;
-               }
+               cleanup_handle = handle;
                break;
        }
        case ION_IOC_FREE:
        {
-               struct ion_handle_data data;
-               bool valid;
+               struct ion_handle *handle;
 
-               if (copy_from_user(&data, (void __user *)arg,
-                                  sizeof(struct ion_handle_data)))
-                       return -EFAULT;
-               mutex_lock(&client->lock);
-               valid = ion_handle_validate(client, data.handle);
-               mutex_unlock(&client->lock);
-               if (!valid)
-                       return -EINVAL;
-               ion_free(client, data.handle);
+               handle = ion_handle_get_by_id(client, data.handle.handle);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               ion_free(client, handle);
+               ion_handle_put(handle);
                break;
        }
        case ION_IOC_SHARE:
        case ION_IOC_MAP:
        {
-               struct ion_fd_data data;
-
-               if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
-                       return -EFAULT;
-               data.fd = ion_share_dma_buf(client, data.handle);
-               if (copy_to_user((void __user *)arg, &data, sizeof(data)))
-                       return -EFAULT;
-               if (data.fd < 0)
-                       return data.fd;
+               struct ion_handle *handle;
+
+               handle = ion_handle_get_by_id(client, data.handle.handle);
+               if (IS_ERR(handle))
+                       return PTR_ERR(handle);
+               data.fd.fd = ion_share_dma_buf_fd(client, handle);
+               ion_handle_put(handle);
+               if (data.fd.fd < 0)
+                       ret = data.fd.fd;
                break;
        }
        case ION_IOC_IMPORT:
        {
-               struct ion_fd_data data;
-               int ret = 0;
-               if (copy_from_user(&data, (void __user *)arg,
-                                  sizeof(struct ion_fd_data)))
-                       return -EFAULT;
-               data.handle = ion_import_dma_buf(client, data.fd);
-               if (IS_ERR(data.handle)) {
-                       ret = PTR_ERR(data.handle);
-                       data.handle = NULL;
-               }
-               if (copy_to_user((void __user *)arg, &data,
-                                sizeof(struct ion_fd_data)))
-                       return -EFAULT;
-               if (ret < 0)
-                       return ret;
+               struct ion_handle *handle;
+               handle = ion_import_dma_buf(client, data.fd.fd);
+               if (IS_ERR(handle))
+                       ret = PTR_ERR(handle);
+               else
+                       data.handle.handle = handle->id;
                break;
        }
        case ION_IOC_SYNC:
        {
-               struct ion_fd_data data;
-               if (copy_from_user(&data, (void __user *)arg,
-                                  sizeof(struct ion_fd_data)))
-                       return -EFAULT;
-               ion_sync_for_device(client, data.fd);
+               ret = ion_sync_for_device(client, data.fd.fd);
                break;
        }
        case ION_IOC_CUSTOM:
        {
-               struct ion_device *dev = client->dev;
-               struct ion_custom_data data;
-
                if (!dev->custom_ioctl)
                        return -ENOTTY;
-               if (copy_from_user(&data, (void __user *)arg,
-                               sizeof(struct ion_custom_data)))
-                       return -EFAULT;
-               return dev->custom_ioctl(client, data.cmd, data.arg);
+               ret = dev->custom_ioctl(client, data.custom.cmd,
+                                               data.custom.arg);
+               break;
        }
        default:
                return -ENOTTY;
        }
-       return 0;
+
+       if (dir & _IOC_READ) {
+               if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+                       if (cleanup_handle)
+                               ion_free(client, cleanup_handle);
+                       return -EFAULT;
+               }
+       }
+       return ret;
 }
 
 static int ion_release(struct inode *inode, struct file *file)
@@ -1161,8 +1292,8 @@ static int ion_open(struct inode *inode, struct file *file)
        struct ion_client *client;
 
        pr_debug("%s: %d\n", __func__, __LINE__);
-       client = ion_client_create(dev, -1, "user");
-       if (IS_ERR_OR_NULL(client))
+       client = ion_client_create(dev, "user");
+       if (IS_ERR(client))
                return PTR_ERR(client);
        file->private_data = client;
 
@@ -1174,10 +1305,11 @@ static const struct file_operations ion_fops = {
        .open           = ion_open,
        .release        = ion_release,
        .unlocked_ioctl = ion_ioctl,
+       .compat_ioctl   = compat_ion_ioctl,
 };
 
 static size_t ion_debug_heap_total(struct ion_client *client,
-                                  enum ion_heap_type type)
+                                  unsigned int id)
 {
        size_t size = 0;
        struct rb_node *n;
@@ -1187,7 +1319,7 @@ static size_t ion_debug_heap_total(struct ion_client *client,
                struct ion_handle *handle = rb_entry(n,
                                                     struct ion_handle,
                                                     node);
-               if (handle->buffer->heap->type == type)
+               if (handle->buffer->heap->id == id)
                        size += handle->buffer->size;
        }
        mutex_unlock(&client->lock);
@@ -1208,17 +1340,17 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
        for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
                struct ion_client *client = rb_entry(n, struct ion_client,
                                                     node);
-               size_t size = ion_debug_heap_total(client, heap->type);
+               size_t size = ion_debug_heap_total(client, heap->id);
                if (!size)
                        continue;
                if (client->task) {
                        char task_comm[TASK_COMM_LEN];
 
                        get_task_comm(task_comm, client->task);
-                       seq_printf(s, "%16.s %16u %16u\n", task_comm,
+                       seq_printf(s, "%16.s %16u %16zu\n", task_comm,
                                   client->pid, size);
                } else {
-                       seq_printf(s, "%16.s %16u %16u\n", client->name,
+                       seq_printf(s, "%16.s %16u %16zu\n", client->name,
                                   client->pid, size);
                }
        }
@@ -1229,21 +1361,25 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
        for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
                struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
                                                     node);
-               if (buffer->heap->type != heap->type)
+               if (buffer->heap->id != heap->id)
                        continue;
                total_size += buffer->size;
                if (!buffer->handle_count) {
-                       seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm,
-                                  buffer->pid, buffer->size, buffer->kmap_cnt,
+                       seq_printf(s, "%16.s %16u %16zu %d %d\n",
+                                  buffer->task_comm, buffer->pid,
+                                  buffer->size, buffer->kmap_cnt,
                                   atomic_read(&buffer->ref.refcount));
                        total_orphaned_size += buffer->size;
                }
        }
        mutex_unlock(&dev->buffer_lock);
        seq_printf(s, "----------------------------------------------------\n");
-       seq_printf(s, "%16.s %16u\n", "total orphaned",
+       seq_printf(s, "%16.s %16zu\n", "total orphaned",
                   total_orphaned_size);
-       seq_printf(s, "%16.s %16u\n", "total ", total_size);
+       seq_printf(s, "%16.s %16zu\n", "total ", total_size);
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               seq_printf(s, "%16.s %16zu\n", "deferred free",
+                               heap->free_list_size);
        seq_printf(s, "----------------------------------------------------\n");
 
        if (heap->debug_show)
@@ -1264,6 +1400,44 @@ static const struct file_operations debug_heap_fops = {
        .release = single_release,
 };
 
+#ifdef DEBUG_HEAP_SHRINKER
+static int debug_shrink_set(void *data, u64 val)
+{
+        struct ion_heap *heap = data;
+        struct shrink_control sc;
+        int objs;
+
+        sc.gfp_mask = -1;
+        sc.nr_to_scan = 0;
+
+        if (!val)
+                return 0;
+
+        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+        sc.nr_to_scan = objs;
+
+        heap->shrinker.shrink(&heap->shrinker, &sc);
+        return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+        struct ion_heap *heap = data;
+        struct shrink_control sc;
+        int objs;
+
+        sc.gfp_mask = -1;
+        sc.nr_to_scan = 0;
+
+        objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+        *val = objs;
+        return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+                        debug_shrink_set, "%llu\n");
+#endif
+
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 {
        if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
@@ -1271,6 +1445,9 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
                pr_err("%s: can not add heap with invalid ops struct.\n",
                       __func__);
 
+       if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+               ion_heap_init_deferred_free(heap);
+
        heap->dev = dev;
        down_write(&dev->lock);
        /* use negative heap->id to reverse the priority -- when traversing
@@ -1279,6 +1456,15 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
        plist_add(&heap->node, &dev->heaps);
        debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
                            &debug_heap_fops);
+#ifdef DEBUG_HEAP_SHRINKER
+       if (heap->shrinker.shrink) {
+               char debug_name[64];
+
+               snprintf(debug_name, 64, "%s_shrink", heap->name);
+               debugfs_create_file(debug_name, 0644, dev->debug_root, heap,
+                                   &debug_shrink_fops);
+       }
+#endif
        up_write(&dev->lock);
 }
 
@@ -1305,7 +1491,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
        }
 
        idev->debug_root = debugfs_create_dir("ion", NULL);
-       if (IS_ERR_OR_NULL(idev->debug_root))
+       if (!idev->debug_root)
                pr_err("ion: failed to create debug files.\n");
 
        idev->custom_ioctl = custom_ioctl;
@@ -1348,11 +1534,11 @@ void __init ion_reserve(struct ion_platform_data *data)
                        int ret = memblock_reserve(data->heaps[i].base,
                                               data->heaps[i].size);
                        if (ret)
-                               pr_err("memblock reserve of %x@%lx failed\n",
+                               pr_err("memblock reserve of %zx@%lx failed\n",
                                       data->heaps[i].size,
                                       data->heaps[i].base);
                }
-               pr_info("%s: %s reserved base %lx size %d\n", __func__,
+               pr_info("%s: %s reserved base %lx size %zu\n", __func__,
                        data->heaps[i].name,
                        data->heaps[i].base,
                        data->heaps[i].size);