2 * drivers/gpu/ion/ion.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/device.h>
18 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/ion.h>
22 #include <linux/list.h>
23 #include <linux/memblock.h>
24 #include <linux/miscdevice.h>
25 #include <linux/export.h>
27 #include <linux/mm_types.h>
28 #include <linux/rbtree.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/seq_file.h>
32 #include <linux/uaccess.h>
33 #include <linux/debugfs.h>
34 #include <linux/dma-buf.h>
39 * struct ion_device - the metadata of the ion device node
40 * @dev: the actual misc device
41 * @buffers: an rb tree of all the existing buffers
42 * @lock: lock protecting the buffers & heaps trees
43 * @heaps: list of all the heaps in the system
44 * @user_clients: list of all the clients created from userspace
47 struct miscdevice dev;
48 struct rb_root buffers;
51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53 struct rb_root clients;
54 struct dentry *debug_root;
58 * struct ion_client - a process/hw block local address space
59 * @node: node in the tree of all clients
60 * @dev: backpointer to ion device
61 * @handles: an rb tree of all the handles in this client
62 * @lock: lock protecting the tree of handles
63 * @heap_mask: mask of all supported heaps
64 * @name: used for debugging
65 * @task: used for debugging
67 * A client represents a list of buffers this client may access.
68 * The mutex stored here is used to protect both handles tree
69 * as well as the handles themselves, and should be held while modifying either.
73 struct ion_device *dev;
74 struct rb_root handles;
76 unsigned int heap_mask;
78 struct task_struct *task;
80 struct dentry *debug_root;
84 * ion_handle - a client local reference to a buffer
85 * @ref: reference count
86 * @client: back pointer to the client the buffer resides in
87 * @buffer: pointer to the buffer
88 * @node: node in the client's handle rbtree
89 * @kmap_cnt: count of times this client has mapped to kernel
90 * @dmap_cnt: count of times this client has mapped for dma
92 * Modifications to node, map_cnt or mapping should be protected by the
93 * lock in the client. Other fields are never changed after initialization.
97 struct ion_client *client;
98 struct ion_buffer *buffer;
100 unsigned int kmap_cnt;
103 /* this function should only be called while dev->lock is held */
104 static void ion_buffer_add(struct ion_device *dev,
105 struct ion_buffer *buffer)
107 struct rb_node **p = &dev->buffers.rb_node;
108 struct rb_node *parent = NULL;
109 struct ion_buffer *entry;
113 entry = rb_entry(parent, struct ion_buffer, node);
115 if (buffer < entry) {
117 } else if (buffer > entry) {
120 pr_err("%s: buffer already found.", __func__);
125 rb_link_node(&buffer->node, parent, p);
126 rb_insert_color(&buffer->node, &dev->buffers);
129 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer);
131 /* this function should only be called while dev->lock is held */
132 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
133 struct ion_device *dev,
138 struct ion_buffer *buffer;
139 struct sg_table *table;
140 struct scatterlist *sg;
143 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
145 return ERR_PTR(-ENOMEM);
148 kref_init(&buffer->ref);
150 ret = heap->ops->allocate(heap, buffer, len, align, flags);
158 buffer->flags = flags;
160 table = heap->ops->map_dma(heap, buffer);
161 if (IS_ERR_OR_NULL(table)) {
162 heap->ops->free(buffer);
164 return ERR_PTR(PTR_ERR(table));
166 buffer->sg_table = table;
167 if (buffer->flags & ION_FLAG_CACHED)
168 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
170 if (sg_dma_len(sg) == PAGE_SIZE)
172 pr_err("%s: cached mappings must have pagewise "
173 "sg_lists\n", __func__);
174 heap->ops->unmap_dma(heap, buffer);
176 return ERR_PTR(-EINVAL);
179 ret = ion_buffer_alloc_dirty(buffer);
181 heap->ops->unmap_dma(heap, buffer);
182 heap->ops->free(buffer);
189 INIT_LIST_HEAD(&buffer->vmas);
190 mutex_init(&buffer->lock);
191 /* this will set up dma addresses for the sglist -- it is not
192 technically correct as per the dma api -- a specific
193 device isn't really taking ownership here. However, in practice on
194 our systems the only dma_address space is physical addresses.
195 Additionally, we can't afford the overhead of invalidating every
196 allocation via dma_map_sg. The implicit contract here is that
197 memory comming from the heaps is ready for dma, ie if it has a
198 cached mapping that mapping has been invalidated */
199 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
200 sg_dma_address(sg) = sg_phys(sg);
201 ion_buffer_add(dev, buffer);
205 static void ion_buffer_destroy(struct kref *kref)
207 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
208 struct ion_device *dev = buffer->dev;
210 if (WARN_ON(buffer->kmap_cnt > 0))
211 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
213 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
214 buffer->heap->ops->free(buffer);
215 mutex_lock(&dev->lock);
216 rb_erase(&buffer->node, &dev->buffers);
217 mutex_unlock(&dev->lock);
221 static void ion_buffer_get(struct ion_buffer *buffer)
223 kref_get(&buffer->ref);
226 static int ion_buffer_put(struct ion_buffer *buffer)
228 return kref_put(&buffer->ref, ion_buffer_destroy);
231 static struct ion_handle *ion_handle_create(struct ion_client *client,
232 struct ion_buffer *buffer)
234 struct ion_handle *handle;
236 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
238 return ERR_PTR(-ENOMEM);
239 kref_init(&handle->ref);
240 RB_CLEAR_NODE(&handle->node);
241 handle->client = client;
242 ion_buffer_get(buffer);
243 handle->buffer = buffer;
248 static void ion_handle_kmap_put(struct ion_handle *);
250 static void ion_handle_destroy(struct kref *kref)
252 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
253 struct ion_client *client = handle->client;
254 struct ion_buffer *buffer = handle->buffer;
256 mutex_lock(&client->lock);
258 mutex_lock(&buffer->lock);
259 while (handle->kmap_cnt)
260 ion_handle_kmap_put(handle);
261 mutex_unlock(&buffer->lock);
263 if (!RB_EMPTY_NODE(&handle->node))
264 rb_erase(&handle->node, &client->handles);
265 mutex_unlock(&client->lock);
267 ion_buffer_put(buffer);
271 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
273 return handle->buffer;
276 static void ion_handle_get(struct ion_handle *handle)
278 kref_get(&handle->ref);
281 static int ion_handle_put(struct ion_handle *handle)
283 return kref_put(&handle->ref, ion_handle_destroy);
286 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
287 struct ion_buffer *buffer)
291 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
292 struct ion_handle *handle = rb_entry(n, struct ion_handle,
294 if (handle->buffer == buffer)
300 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
302 struct rb_node *n = client->handles.rb_node;
305 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
307 if (handle < handle_node)
309 else if (handle > handle_node)
317 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
319 struct rb_node **p = &client->handles.rb_node;
320 struct rb_node *parent = NULL;
321 struct ion_handle *entry;
325 entry = rb_entry(parent, struct ion_handle, node);
329 else if (handle > entry)
332 WARN(1, "%s: buffer already found.", __func__);
335 rb_link_node(&handle->node, parent, p);
336 rb_insert_color(&handle->node, &client->handles);
339 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
340 size_t align, unsigned int heap_mask,
344 struct ion_handle *handle;
345 struct ion_device *dev = client->dev;
346 struct ion_buffer *buffer = NULL;
348 pr_debug("%s: len %d align %d heap_mask %u flags %x\n", __func__, len,
349 align, heap_mask, flags);
351 * traverse the list of heaps available in this system in priority
352 * order. If the heap type is supported by the client, and matches the
353 * request of the caller allocate from it. Repeat until allocate has
354 * succeeded or all heaps have been tried
357 return ERR_PTR(-EINVAL);
359 len = PAGE_ALIGN(len);
361 mutex_lock(&dev->lock);
362 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
363 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
364 /* if the client doesn't support this heap type */
365 if (!((1 << heap->type) & client->heap_mask))
367 /* if the caller didn't specify this heap type */
368 if (!((1 << heap->id) & heap_mask))
370 buffer = ion_buffer_create(heap, dev, len, align, flags);
371 if (!IS_ERR_OR_NULL(buffer))
374 mutex_unlock(&dev->lock);
377 return ERR_PTR(-ENODEV);
380 return ERR_PTR(PTR_ERR(buffer));
382 handle = ion_handle_create(client, buffer);
385 * ion_buffer_create will create a buffer with a ref_cnt of 1,
386 * and ion_handle_create will take a second reference, drop one here
388 ion_buffer_put(buffer);
390 if (!IS_ERR(handle)) {
391 mutex_lock(&client->lock);
392 ion_handle_add(client, handle);
393 mutex_unlock(&client->lock);
400 void ion_free(struct ion_client *client, struct ion_handle *handle)
404 BUG_ON(client != handle->client);
406 mutex_lock(&client->lock);
407 valid_handle = ion_handle_validate(client, handle);
408 mutex_unlock(&client->lock);
411 WARN("%s: invalid handle passed to free.\n", __func__);
414 ion_handle_put(handle);
417 int ion_phys(struct ion_client *client, struct ion_handle *handle,
418 ion_phys_addr_t *addr, size_t *len)
420 struct ion_buffer *buffer;
423 mutex_lock(&client->lock);
424 if (!ion_handle_validate(client, handle)) {
425 mutex_unlock(&client->lock);
429 buffer = handle->buffer;
431 if (!buffer->heap->ops->phys) {
432 pr_err("%s: ion_phys is not implemented by this heap.\n",
434 mutex_unlock(&client->lock);
437 mutex_unlock(&client->lock);
438 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
442 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
446 if (buffer->kmap_cnt) {
448 return buffer->vaddr;
450 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
451 if (IS_ERR_OR_NULL(vaddr))
453 buffer->vaddr = vaddr;
458 static void *ion_handle_kmap_get(struct ion_handle *handle)
460 struct ion_buffer *buffer = handle->buffer;
463 if (handle->kmap_cnt) {
465 return buffer->vaddr;
467 vaddr = ion_buffer_kmap_get(buffer);
468 if (IS_ERR_OR_NULL(vaddr))
474 static void ion_buffer_kmap_put(struct ion_buffer *buffer)
477 if (!buffer->kmap_cnt) {
478 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
479 buffer->vaddr = NULL;
483 static void ion_handle_kmap_put(struct ion_handle *handle)
485 struct ion_buffer *buffer = handle->buffer;
488 if (!handle->kmap_cnt)
489 ion_buffer_kmap_put(buffer);
492 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
494 struct ion_buffer *buffer;
497 mutex_lock(&client->lock);
498 if (!ion_handle_validate(client, handle)) {
499 pr_err("%s: invalid handle passed to map_kernel.\n",
501 mutex_unlock(&client->lock);
502 return ERR_PTR(-EINVAL);
505 buffer = handle->buffer;
507 if (!handle->buffer->heap->ops->map_kernel) {
508 pr_err("%s: map_kernel is not implemented by this heap.\n",
510 mutex_unlock(&client->lock);
511 return ERR_PTR(-ENODEV);
514 mutex_lock(&buffer->lock);
515 vaddr = ion_handle_kmap_get(handle);
516 mutex_unlock(&buffer->lock);
517 mutex_unlock(&client->lock);
521 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
523 struct ion_buffer *buffer;
525 mutex_lock(&client->lock);
526 buffer = handle->buffer;
527 mutex_lock(&buffer->lock);
528 ion_handle_kmap_put(handle);
529 mutex_unlock(&buffer->lock);
530 mutex_unlock(&client->lock);
533 static int ion_debug_client_show(struct seq_file *s, void *unused)
535 struct ion_client *client = s->private;
537 size_t sizes[ION_NUM_HEAPS] = {0};
538 const char *names[ION_NUM_HEAPS] = {0};
541 mutex_lock(&client->lock);
542 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
543 struct ion_handle *handle = rb_entry(n, struct ion_handle,
545 enum ion_heap_type type = handle->buffer->heap->type;
548 names[type] = handle->buffer->heap->name;
549 sizes[type] += handle->buffer->size;
551 mutex_unlock(&client->lock);
553 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
554 for (i = 0; i < ION_NUM_HEAPS; i++) {
557 seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]);
562 static int ion_debug_client_open(struct inode *inode, struct file *file)
564 return single_open(file, ion_debug_client_show, inode->i_private);
567 static const struct file_operations debug_client_fops = {
568 .open = ion_debug_client_open,
571 .release = single_release,
574 struct ion_client *ion_client_create(struct ion_device *dev,
575 unsigned int heap_mask,
578 struct ion_client *client;
579 struct task_struct *task;
581 struct rb_node *parent = NULL;
582 struct ion_client *entry;
586 get_task_struct(current->group_leader);
587 task_lock(current->group_leader);
588 pid = task_pid_nr(current->group_leader);
589 /* don't bother to store task struct for kernel threads,
590 they can't be killed anyway */
591 if (current->group_leader->flags & PF_KTHREAD) {
592 put_task_struct(current->group_leader);
595 task = current->group_leader;
597 task_unlock(current->group_leader);
599 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
602 put_task_struct(current->group_leader);
603 return ERR_PTR(-ENOMEM);
607 client->handles = RB_ROOT;
608 mutex_init(&client->lock);
610 client->heap_mask = heap_mask;
614 mutex_lock(&dev->lock);
615 p = &dev->clients.rb_node;
618 entry = rb_entry(parent, struct ion_client, node);
622 else if (client > entry)
625 rb_link_node(&client->node, parent, p);
626 rb_insert_color(&client->node, &dev->clients);
628 snprintf(debug_name, 64, "%u", client->pid);
629 client->debug_root = debugfs_create_file(debug_name, 0664,
630 dev->debug_root, client,
632 mutex_unlock(&dev->lock);
637 void ion_client_destroy(struct ion_client *client)
639 struct ion_device *dev = client->dev;
642 pr_debug("%s: %d\n", __func__, __LINE__);
643 while ((n = rb_first(&client->handles))) {
644 struct ion_handle *handle = rb_entry(n, struct ion_handle,
646 ion_handle_destroy(&handle->ref);
648 mutex_lock(&dev->lock);
650 put_task_struct(client->task);
651 rb_erase(&client->node, &dev->clients);
652 debugfs_remove_recursive(client->debug_root);
653 mutex_unlock(&dev->lock);
658 struct sg_table *ion_sg_table(struct ion_client *client,
659 struct ion_handle *handle)
661 struct ion_buffer *buffer;
662 struct sg_table *table;
664 mutex_lock(&client->lock);
665 if (!ion_handle_validate(client, handle)) {
666 pr_err("%s: invalid handle passed to map_dma.\n",
668 mutex_unlock(&client->lock);
669 return ERR_PTR(-EINVAL);
671 buffer = handle->buffer;
672 table = buffer->sg_table;
673 mutex_unlock(&client->lock);
677 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
679 enum dma_data_direction direction);
681 static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
682 enum dma_data_direction direction)
684 struct dma_buf *dmabuf = attachment->dmabuf;
685 struct ion_buffer *buffer = dmabuf->priv;
687 if (buffer->flags & ION_FLAG_CACHED)
688 ion_buffer_sync_for_device(buffer, attachment->dev, direction);
689 return buffer->sg_table;
692 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
693 struct sg_table *table,
694 enum dma_data_direction direction)
698 static int ion_buffer_alloc_dirty(struct ion_buffer *buffer)
700 unsigned long pages = buffer->sg_table->nents;
701 unsigned long length = (pages + BITS_PER_LONG - 1)/BITS_PER_LONG;
703 buffer->dirty = kzalloc(length * sizeof(unsigned long), GFP_KERNEL);
709 struct ion_vma_list {
710 struct list_head list;
711 struct vm_area_struct *vma;
714 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
716 enum dma_data_direction dir)
718 struct scatterlist *sg;
720 struct ion_vma_list *vma_list;
722 pr_debug("%s: syncing for device %s\n", __func__,
723 dev ? dev_name(dev) : "null");
724 mutex_lock(&buffer->lock);
725 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
726 if (!test_bit(i, buffer->dirty))
728 dma_sync_sg_for_device(dev, sg, 1, dir);
729 clear_bit(i, buffer->dirty);
731 list_for_each_entry(vma_list, &buffer->vmas, list) {
732 struct vm_area_struct *vma = vma_list->vma;
734 zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
737 mutex_unlock(&buffer->lock);
740 int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
742 struct ion_buffer *buffer = vma->vm_private_data;
743 struct scatterlist *sg;
746 mutex_lock(&buffer->lock);
747 set_bit(vmf->pgoff, buffer->dirty);
749 for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
752 dma_sync_sg_for_cpu(NULL, sg, 1, DMA_BIDIRECTIONAL);
753 vm_insert_page(vma, (unsigned long)vmf->virtual_address,
757 mutex_unlock(&buffer->lock);
758 return VM_FAULT_NOPAGE;
761 static void ion_vm_open(struct vm_area_struct *vma)
763 struct ion_buffer *buffer = vma->vm_private_data;
764 struct ion_vma_list *vma_list;
766 vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
770 mutex_lock(&buffer->lock);
771 list_add(&vma_list->list, &buffer->vmas);
772 mutex_unlock(&buffer->lock);
773 pr_debug("%s: adding %p\n", __func__, vma);
776 static void ion_vm_close(struct vm_area_struct *vma)
778 struct ion_buffer *buffer = vma->vm_private_data;
779 struct ion_vma_list *vma_list, *tmp;
781 pr_debug("%s\n", __func__);
782 mutex_lock(&buffer->lock);
783 list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
784 if (vma_list->vma != vma)
786 list_del(&vma_list->list);
788 pr_debug("%s: deleting %p\n", __func__, vma);
791 mutex_unlock(&buffer->lock);
794 struct vm_operations_struct ion_vma_ops = {
796 .close = ion_vm_close,
797 .fault = ion_vm_fault,
800 static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
802 struct ion_buffer *buffer = dmabuf->priv;
805 if (!buffer->heap->ops->map_user) {
806 pr_err("%s: this heap does not define a method for mapping "
807 "to userspace\n", __func__);
811 if (buffer->flags & ION_FLAG_CACHED) {
812 vma->vm_private_data = buffer;
813 vma->vm_ops = &ion_vma_ops;
816 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
817 mutex_lock(&buffer->lock);
818 /* now map it to userspace */
819 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
820 mutex_unlock(&buffer->lock);
824 pr_err("%s: failure mapping buffer to userspace\n",
830 static void ion_dma_buf_release(struct dma_buf *dmabuf)
832 struct ion_buffer *buffer = dmabuf->priv;
833 ion_buffer_put(buffer);
836 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
838 struct ion_buffer *buffer = dmabuf->priv;
839 return buffer->vaddr + offset;
842 static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
848 static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
850 enum dma_data_direction direction)
852 struct ion_buffer *buffer = dmabuf->priv;
855 if (!buffer->heap->ops->map_kernel) {
856 pr_err("%s: map kernel is not implemented by this heap.\n",
861 mutex_lock(&buffer->lock);
862 vaddr = ion_buffer_kmap_get(buffer);
863 mutex_unlock(&buffer->lock);
865 return PTR_ERR(vaddr);
871 static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
873 enum dma_data_direction direction)
875 struct ion_buffer *buffer = dmabuf->priv;
877 mutex_lock(&buffer->lock);
878 ion_buffer_kmap_put(buffer);
879 mutex_unlock(&buffer->lock);
882 struct dma_buf_ops dma_buf_ops = {
883 .map_dma_buf = ion_map_dma_buf,
884 .unmap_dma_buf = ion_unmap_dma_buf,
886 .release = ion_dma_buf_release,
887 .begin_cpu_access = ion_dma_buf_begin_cpu_access,
888 .end_cpu_access = ion_dma_buf_end_cpu_access,
889 .kmap_atomic = ion_dma_buf_kmap,
890 .kunmap_atomic = ion_dma_buf_kunmap,
891 .kmap = ion_dma_buf_kmap,
892 .kunmap = ion_dma_buf_kunmap,
895 int ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle)
897 struct ion_buffer *buffer;
898 struct dma_buf *dmabuf;
902 mutex_lock(&client->lock);
903 valid_handle = ion_handle_validate(client, handle);
904 mutex_unlock(&client->lock);
906 WARN("%s: invalid handle passed to share.\n", __func__);
910 buffer = handle->buffer;
911 ion_buffer_get(buffer);
912 dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR);
913 if (IS_ERR(dmabuf)) {
914 ion_buffer_put(buffer);
915 return PTR_ERR(dmabuf);
917 fd = dma_buf_fd(dmabuf, O_CLOEXEC);
920 ion_buffer_put(buffer);
925 struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
927 struct dma_buf *dmabuf;
928 struct ion_buffer *buffer;
929 struct ion_handle *handle;
931 dmabuf = dma_buf_get(fd);
932 if (IS_ERR_OR_NULL(dmabuf))
933 return ERR_PTR(PTR_ERR(dmabuf));
934 /* if this memory came from ion */
936 if (dmabuf->ops != &dma_buf_ops) {
937 pr_err("%s: can not import dmabuf from another exporter\n",
940 return ERR_PTR(-EINVAL);
942 buffer = dmabuf->priv;
944 mutex_lock(&client->lock);
945 /* if a handle exists for this buffer just take a reference to it */
946 handle = ion_handle_lookup(client, buffer);
947 if (!IS_ERR_OR_NULL(handle)) {
948 ion_handle_get(handle);
951 handle = ion_handle_create(client, buffer);
952 if (IS_ERR_OR_NULL(handle))
954 ion_handle_add(client, handle);
956 mutex_unlock(&client->lock);
961 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
963 struct ion_client *client = filp->private_data;
968 struct ion_allocation_data data;
970 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
972 data.handle = ion_alloc(client, data.len, data.align,
973 data.heap_mask, data.flags);
975 if (IS_ERR(data.handle))
976 return PTR_ERR(data.handle);
978 if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
979 ion_free(client, data.handle);
986 struct ion_handle_data data;
989 if (copy_from_user(&data, (void __user *)arg,
990 sizeof(struct ion_handle_data)))
992 mutex_lock(&client->lock);
993 valid = ion_handle_validate(client, data.handle);
994 mutex_unlock(&client->lock);
997 ion_free(client, data.handle);
1002 struct ion_fd_data data;
1004 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
1006 data.fd = ion_share_dma_buf(client, data.handle);
1007 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
1011 case ION_IOC_IMPORT:
1013 struct ion_fd_data data;
1014 if (copy_from_user(&data, (void __user *)arg,
1015 sizeof(struct ion_fd_data)))
1017 data.handle = ion_import_dma_buf(client, data.fd);
1018 if (IS_ERR(data.handle))
1020 if (copy_to_user((void __user *)arg, &data,
1021 sizeof(struct ion_fd_data)))
1025 case ION_IOC_CUSTOM:
1027 struct ion_device *dev = client->dev;
1028 struct ion_custom_data data;
1030 if (!dev->custom_ioctl)
1032 if (copy_from_user(&data, (void __user *)arg,
1033 sizeof(struct ion_custom_data)))
1035 return dev->custom_ioctl(client, data.cmd, data.arg);
1043 static int ion_release(struct inode *inode, struct file *file)
1045 struct ion_client *client = file->private_data;
1047 pr_debug("%s: %d\n", __func__, __LINE__);
1048 ion_client_destroy(client);
1052 static int ion_open(struct inode *inode, struct file *file)
1054 struct miscdevice *miscdev = file->private_data;
1055 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1056 struct ion_client *client;
1058 pr_debug("%s: %d\n", __func__, __LINE__);
1059 client = ion_client_create(dev, -1, "user");
1060 if (IS_ERR_OR_NULL(client))
1061 return PTR_ERR(client);
1062 file->private_data = client;
1067 static const struct file_operations ion_fops = {
1068 .owner = THIS_MODULE,
1070 .release = ion_release,
1071 .unlocked_ioctl = ion_ioctl,
1074 static size_t ion_debug_heap_total(struct ion_client *client,
1075 enum ion_heap_type type)
1080 mutex_lock(&client->lock);
1081 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1082 struct ion_handle *handle = rb_entry(n,
1085 if (handle->buffer->heap->type == type)
1086 size += handle->buffer->size;
1088 mutex_unlock(&client->lock);
1092 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1094 struct ion_heap *heap = s->private;
1095 struct ion_device *dev = heap->dev;
1098 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1100 for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1101 struct ion_client *client = rb_entry(n, struct ion_client,
1103 size_t size = ion_debug_heap_total(client, heap->type);
1107 char task_comm[TASK_COMM_LEN];
1109 get_task_comm(task_comm, client->task);
1110 seq_printf(s, "%16.s %16u %16u\n", task_comm,
1113 seq_printf(s, "%16.s %16u %16u\n", client->name,
1120 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1122 return single_open(file, ion_debug_heap_show, inode->i_private);
1125 static const struct file_operations debug_heap_fops = {
1126 .open = ion_debug_heap_open,
1128 .llseek = seq_lseek,
1129 .release = single_release,
1132 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1134 struct rb_node **p = &dev->heaps.rb_node;
1135 struct rb_node *parent = NULL;
1136 struct ion_heap *entry;
1138 if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1139 !heap->ops->unmap_dma)
1140 pr_err("%s: can not add heap with invalid ops struct.\n",
1144 mutex_lock(&dev->lock);
1147 entry = rb_entry(parent, struct ion_heap, node);
1149 if (heap->id < entry->id) {
1151 } else if (heap->id > entry->id ) {
1152 p = &(*p)->rb_right;
1154 pr_err("%s: can not insert multiple heaps with "
1155 "id %d\n", __func__, heap->id);
1160 rb_link_node(&heap->node, parent, p);
1161 rb_insert_color(&heap->node, &dev->heaps);
1162 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1165 mutex_unlock(&dev->lock);
1168 struct ion_device *ion_device_create(long (*custom_ioctl)
1169 (struct ion_client *client,
1173 struct ion_device *idev;
1176 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1178 return ERR_PTR(-ENOMEM);
1180 idev->dev.minor = MISC_DYNAMIC_MINOR;
1181 idev->dev.name = "ion";
1182 idev->dev.fops = &ion_fops;
1183 idev->dev.parent = NULL;
1184 ret = misc_register(&idev->dev);
1186 pr_err("ion: failed to register misc device.\n");
1187 return ERR_PTR(ret);
1190 idev->debug_root = debugfs_create_dir("ion", NULL);
1191 if (IS_ERR_OR_NULL(idev->debug_root))
1192 pr_err("ion: failed to create debug files.\n");
1194 idev->custom_ioctl = custom_ioctl;
1195 idev->buffers = RB_ROOT;
1196 mutex_init(&idev->lock);
1197 idev->heaps = RB_ROOT;
1198 idev->clients = RB_ROOT;
1202 void ion_device_destroy(struct ion_device *dev)
1204 misc_deregister(&dev->dev);
1205 /* XXX need to free the heaps and clients ? */
1209 void __init ion_reserve(struct ion_platform_data *data)
1213 for (i = 0; i < data->nr; i++) {
1214 if (data->heaps[i].size == 0)
1216 ret = memblock_reserve(data->heaps[i].base,
1217 data->heaps[i].size);
1219 pr_err("memblock reserve of %x@%lx failed\n",
1220 data->heaps[i].size,
1221 data->heaps[i].base);