2 * drivers/staging/android/ion/ion.c
4 * Copyright (C) 2011 Google, Inc.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/device.h>
18 #include <linux/file.h>
20 #include <linux/anon_inodes.h>
21 #include <linux/list.h>
22 #include <linux/memblock.h>
23 #include <linux/miscdevice.h>
24 #include <linux/export.h>
26 #include <linux/mm_types.h>
27 #include <linux/rbtree.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
30 #include <linux/seq_file.h>
31 #include <linux/uaccess.h>
32 #include <linux/debugfs.h>
39 * struct ion_device - the metadata of the ion device node
40 * @dev: the actual misc device
41 * @buffers: an rb tree of all the existing buffers
42 * @lock: lock protecting the buffers & heaps trees
43 * @heaps: list of all the heaps in the system
44 * @user_clients: list of all the clients created from userspace
47 struct miscdevice dev;
48 struct rb_root buffers;
51 long (*custom_ioctl) (struct ion_client *client, unsigned int cmd,
53 struct rb_root user_clients;
54 struct rb_root kernel_clients;
55 struct dentry *debug_root;
59 * struct ion_client - a process/hw block local address space
60 * @ref: for reference counting the client
61 * @node: node in the tree of all clients
62 * @dev: backpointer to ion device
63 * @handles: an rb tree of all the handles in this client
64 * @lock: lock protecting the tree of handles
65 * @heap_mask: mask of all supported heaps
66 * @name: used for debugging
67 * @task: used for debugging
69 * A client represents a list of buffers this client may access.
70 * The mutex stored here is used to protect both handles tree
71 * as well as the handles themselves, and should be held while modifying either.
76 struct ion_device *dev;
77 struct rb_root handles;
79 unsigned int heap_mask;
81 struct task_struct *task;
83 struct dentry *debug_root;
87 * ion_handle - a client local reference to a buffer
88 * @ref: reference count
89 * @client: back pointer to the client the buffer resides in
90 * @buffer: pointer to the buffer
91 * @node: node in the client's handle rbtree
92 * @kmap_cnt: count of times this client has mapped to kernel
93 * @dmap_cnt: count of times this client has mapped for dma
94 * @usermap_cnt: count of times this client has mapped for userspace
96 * Modifications to node, map_cnt or mapping should be protected by the
97 * lock in the client. Other fields are never changed after initialization.
101 struct ion_client *client;
102 struct ion_buffer *buffer;
104 unsigned int kmap_cnt;
105 unsigned int dmap_cnt;
106 unsigned int usermap_cnt;
109 /* this function should only be called while dev->lock is held */
110 static void ion_buffer_add(struct ion_device *dev,
111 struct ion_buffer *buffer)
113 struct rb_node **p = &dev->buffers.rb_node;
114 struct rb_node *parent = NULL;
115 struct ion_buffer *entry;
119 entry = rb_entry(parent, struct ion_buffer, node);
121 if (buffer < entry) {
123 } else if (buffer > entry) {
126 pr_err("%s: buffer already found.", __func__);
131 rb_link_node(&buffer->node, parent, p);
132 rb_insert_color(&buffer->node, &dev->buffers);
135 /* this function should only be called while dev->lock is held */
136 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
137 struct ion_device *dev,
142 struct ion_buffer *buffer;
145 buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
147 return ERR_PTR(-ENOMEM);
150 kref_init(&buffer->ref);
152 ret = heap->ops->allocate(heap, buffer, len, align, flags);
159 mutex_init(&buffer->lock);
160 ion_buffer_add(dev, buffer);
164 static void ion_buffer_destroy(struct kref *kref)
166 struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
167 struct ion_device *dev = buffer->dev;
169 buffer->heap->ops->free(buffer);
170 mutex_lock(&dev->lock);
171 rb_erase(&buffer->node, &dev->buffers);
172 mutex_unlock(&dev->lock);
176 static void ion_buffer_get(struct ion_buffer *buffer)
178 kref_get(&buffer->ref);
181 static int ion_buffer_put(struct ion_buffer *buffer)
183 return kref_put(&buffer->ref, ion_buffer_destroy);
186 static struct ion_handle *ion_handle_create(struct ion_client *client,
187 struct ion_buffer *buffer)
189 struct ion_handle *handle;
191 handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
193 return ERR_PTR(-ENOMEM);
194 kref_init(&handle->ref);
195 RB_CLEAR_NODE(&handle->node);
196 handle->client = client;
197 ion_buffer_get(buffer);
198 handle->buffer = buffer;
203 static void ion_handle_destroy(struct kref *kref)
205 struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
206 /* XXX Can a handle be destroyed while it's map count is non-zero?:
207 if (handle->map_cnt) unmap
209 ion_buffer_put(handle->buffer);
210 mutex_lock(&handle->client->lock);
211 if (!RB_EMPTY_NODE(&handle->node))
212 rb_erase(&handle->node, &handle->client->handles);
213 mutex_unlock(&handle->client->lock);
217 struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
219 return handle->buffer;
222 static void ion_handle_get(struct ion_handle *handle)
224 kref_get(&handle->ref);
227 static int ion_handle_put(struct ion_handle *handle)
229 return kref_put(&handle->ref, ion_handle_destroy);
232 static struct ion_handle *ion_handle_lookup(struct ion_client *client,
233 struct ion_buffer *buffer)
237 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
238 struct ion_handle *handle = rb_entry(n, struct ion_handle,
240 if (handle->buffer == buffer)
246 static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
248 struct rb_node *n = client->handles.rb_node;
251 struct ion_handle *handle_node = rb_entry(n, struct ion_handle,
253 if (handle < handle_node)
255 else if (handle > handle_node)
263 static void ion_handle_add(struct ion_client *client, struct ion_handle *handle)
265 struct rb_node **p = &client->handles.rb_node;
266 struct rb_node *parent = NULL;
267 struct ion_handle *entry;
271 entry = rb_entry(parent, struct ion_handle, node);
275 else if (handle > entry)
278 WARN(1, "%s: buffer already found.", __func__);
281 rb_link_node(&handle->node, parent, p);
282 rb_insert_color(&handle->node, &client->handles);
285 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
286 size_t align, unsigned int flags)
289 struct ion_handle *handle;
290 struct ion_device *dev = client->dev;
291 struct ion_buffer *buffer = NULL;
294 * traverse the list of heaps available in this system in priority
295 * order. If the heap type is supported by the client, and matches the
296 * request of the caller allocate from it. Repeat until allocate has
297 * succeeded or all heaps have been tried
299 mutex_lock(&dev->lock);
300 for (n = rb_first(&dev->heaps); n != NULL; n = rb_next(n)) {
301 struct ion_heap *heap = rb_entry(n, struct ion_heap, node);
302 /* if the client doesn't support this heap type */
303 if (!((1 << heap->type) & client->heap_mask))
305 /* if the caller didn't specify this heap type */
306 if (!((1 << heap->id) & flags))
308 buffer = ion_buffer_create(heap, dev, len, align, flags);
309 if (!IS_ERR_OR_NULL(buffer))
312 mutex_unlock(&dev->lock);
314 if (IS_ERR_OR_NULL(buffer))
315 return ERR_PTR(PTR_ERR(buffer));
317 handle = ion_handle_create(client, buffer);
319 if (IS_ERR_OR_NULL(handle))
323 * ion_buffer_create will create a buffer with a ref_cnt of 1,
324 * and ion_handle_create will take a second reference, drop one here
326 ion_buffer_put(buffer);
328 mutex_lock(&client->lock);
329 ion_handle_add(client, handle);
330 mutex_unlock(&client->lock);
334 ion_buffer_put(buffer);
338 void ion_free(struct ion_client *client, struct ion_handle *handle)
342 BUG_ON(client != handle->client);
344 mutex_lock(&client->lock);
345 valid_handle = ion_handle_validate(client, handle);
346 mutex_unlock(&client->lock);
349 WARN("%s: invalid handle passed to free.\n", __func__);
352 ion_handle_put(handle);
355 static void ion_client_get(struct ion_client *client);
356 static int ion_client_put(struct ion_client *client);
358 static bool _ion_map(int *buffer_cnt, int *handle_cnt)
362 BUG_ON(*handle_cnt != 0 && *buffer_cnt == 0);
368 if (*handle_cnt == 0)
374 static bool _ion_unmap(int *buffer_cnt, int *handle_cnt)
376 BUG_ON(*handle_cnt == 0);
378 if (*handle_cnt != 0)
380 BUG_ON(*buffer_cnt == 0);
382 if (*buffer_cnt == 0)
387 int ion_phys(struct ion_client *client, struct ion_handle *handle,
388 ion_phys_addr_t *addr, size_t *len)
390 struct ion_buffer *buffer;
393 mutex_lock(&client->lock);
394 if (!ion_handle_validate(client, handle)) {
395 mutex_unlock(&client->lock);
399 buffer = handle->buffer;
401 if (!buffer->heap->ops->phys) {
402 pr_err("%s: ion_phys is not implemented by this heap.\n",
404 mutex_unlock(&client->lock);
407 mutex_unlock(&client->lock);
408 ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
412 void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
414 struct ion_buffer *buffer;
417 mutex_lock(&client->lock);
418 if (!ion_handle_validate(client, handle)) {
419 pr_err("%s: invalid handle passed to map_kernel.\n",
421 mutex_unlock(&client->lock);
422 return ERR_PTR(-EINVAL);
425 buffer = handle->buffer;
426 mutex_lock(&buffer->lock);
428 if (!handle->buffer->heap->ops->map_kernel) {
429 pr_err("%s: map_kernel is not implemented by this heap.\n",
431 mutex_unlock(&buffer->lock);
432 mutex_unlock(&client->lock);
433 return ERR_PTR(-ENODEV);
436 if (_ion_map(&buffer->kmap_cnt, &handle->kmap_cnt)) {
437 vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
438 if (IS_ERR_OR_NULL(vaddr))
439 _ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt);
440 buffer->vaddr = vaddr;
442 vaddr = buffer->vaddr;
444 mutex_unlock(&buffer->lock);
445 mutex_unlock(&client->lock);
449 struct sg_table *ion_map_dma(struct ion_client *client,
450 struct ion_handle *handle)
452 struct ion_buffer *buffer;
453 struct sg_table *table;
455 mutex_lock(&client->lock);
456 if (!ion_handle_validate(client, handle)) {
457 pr_err("%s: invalid handle passed to map_dma.\n",
459 mutex_unlock(&client->lock);
460 return ERR_PTR(-EINVAL);
462 buffer = handle->buffer;
463 mutex_lock(&buffer->lock);
465 if (!handle->buffer->heap->ops->map_dma) {
466 pr_err("%s: map_kernel is not implemented by this heap.\n",
468 mutex_unlock(&buffer->lock);
469 mutex_unlock(&client->lock);
470 return ERR_PTR(-ENODEV);
472 if (_ion_map(&buffer->dmap_cnt, &handle->dmap_cnt)) {
473 table = buffer->heap->ops->map_dma(buffer->heap, buffer);
474 if (IS_ERR_OR_NULL(table))
475 _ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt);
476 buffer->sg_table = table;
478 table = buffer->sg_table;
480 mutex_unlock(&buffer->lock);
481 mutex_unlock(&client->lock);
485 void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
487 struct ion_buffer *buffer;
489 mutex_lock(&client->lock);
490 buffer = handle->buffer;
491 mutex_lock(&buffer->lock);
492 if (_ion_unmap(&buffer->kmap_cnt, &handle->kmap_cnt)) {
493 buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
494 buffer->vaddr = NULL;
496 mutex_unlock(&buffer->lock);
497 mutex_unlock(&client->lock);
500 void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
502 struct ion_buffer *buffer;
504 mutex_lock(&client->lock);
505 buffer = handle->buffer;
506 mutex_lock(&buffer->lock);
507 if (_ion_unmap(&buffer->dmap_cnt, &handle->dmap_cnt)) {
508 buffer->heap->ops->unmap_dma(buffer->heap, buffer);
509 buffer->sg_table = NULL;
511 mutex_unlock(&buffer->lock);
512 mutex_unlock(&client->lock);
516 struct ion_buffer *ion_share(struct ion_client *client,
517 struct ion_handle *handle)
521 mutex_lock(&client->lock);
522 valid_handle = ion_handle_validate(client, handle);
523 mutex_unlock(&client->lock);
525 WARN("%s: invalid handle passed to share.\n", __func__);
526 return ERR_PTR(-EINVAL);
529 /* do not take an extra reference here, the burden is on the caller
530 * to make sure the buffer doesn't go away while it's passing it
531 * to another client -- ion_free should not be called on this handle
532 * until the buffer has been imported into the other client
534 return handle->buffer;
537 struct ion_handle *ion_import(struct ion_client *client,
538 struct ion_buffer *buffer)
540 struct ion_handle *handle = NULL;
542 mutex_lock(&client->lock);
543 /* if a handle exists for this buffer just take a reference to it */
544 handle = ion_handle_lookup(client, buffer);
545 if (!IS_ERR_OR_NULL(handle)) {
546 ion_handle_get(handle);
549 handle = ion_handle_create(client, buffer);
550 if (IS_ERR_OR_NULL(handle))
552 ion_handle_add(client, handle);
554 mutex_unlock(&client->lock);
558 static const struct file_operations ion_share_fops;
560 struct ion_handle *ion_import_fd(struct ion_client *client, int fd)
562 struct file *file = fget(fd);
563 struct ion_handle *handle;
566 pr_err("%s: imported fd not found in file table.\n", __func__);
567 return ERR_PTR(-EINVAL);
569 if (file->f_op != &ion_share_fops) {
570 pr_err("%s: imported file is not a shared ion file.\n",
572 handle = ERR_PTR(-EINVAL);
575 handle = ion_import(client, file->private_data);
581 static int ion_debug_client_show(struct seq_file *s, void *unused)
583 struct ion_client *client = s->private;
585 size_t sizes[ION_NUM_HEAPS] = {0};
586 const char *names[ION_NUM_HEAPS] = {0};
589 mutex_lock(&client->lock);
590 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
591 struct ion_handle *handle = rb_entry(n, struct ion_handle,
593 enum ion_heap_type type = handle->buffer->heap->type;
596 names[type] = handle->buffer->heap->name;
597 sizes[type] += handle->buffer->size;
599 mutex_unlock(&client->lock);
601 seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
602 for (i = 0; i < ION_NUM_HEAPS; i++) {
605 seq_printf(s, "%16.16s: %16u %d\n", names[i], sizes[i],
606 atomic_read(&client->ref.refcount));
611 static int ion_debug_client_open(struct inode *inode, struct file *file)
613 return single_open(file, ion_debug_client_show, inode->i_private);
616 static const struct file_operations debug_client_fops = {
617 .open = ion_debug_client_open,
620 .release = single_release,
623 static struct ion_client *ion_client_lookup(struct ion_device *dev,
624 struct task_struct *task)
626 struct rb_node *n = dev->user_clients.rb_node;
627 struct ion_client *client;
629 mutex_lock(&dev->lock);
631 client = rb_entry(n, struct ion_client, node);
632 if (task == client->task) {
633 ion_client_get(client);
634 mutex_unlock(&dev->lock);
636 } else if (task < client->task) {
638 } else if (task > client->task) {
642 mutex_unlock(&dev->lock);
646 struct ion_client *ion_client_create(struct ion_device *dev,
647 unsigned int heap_mask,
650 struct ion_client *client;
651 struct task_struct *task;
653 struct rb_node *parent = NULL;
654 struct ion_client *entry;
658 get_task_struct(current->group_leader);
659 task_lock(current->group_leader);
660 pid = task_pid_nr(current->group_leader);
661 /* don't bother to store task struct for kernel threads,
662 they can't be killed anyway */
663 if (current->group_leader->flags & PF_KTHREAD) {
664 put_task_struct(current->group_leader);
667 task = current->group_leader;
669 task_unlock(current->group_leader);
671 /* if this isn't a kernel thread, see if a client already
674 client = ion_client_lookup(dev, task);
675 if (!IS_ERR_OR_NULL(client)) {
676 put_task_struct(current->group_leader);
681 client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
683 put_task_struct(current->group_leader);
684 return ERR_PTR(-ENOMEM);
688 client->handles = RB_ROOT;
689 mutex_init(&client->lock);
691 client->heap_mask = heap_mask;
694 kref_init(&client->ref);
696 mutex_lock(&dev->lock);
698 p = &dev->user_clients.rb_node;
701 entry = rb_entry(parent, struct ion_client, node);
703 if (task < entry->task)
705 else if (task > entry->task)
708 rb_link_node(&client->node, parent, p);
709 rb_insert_color(&client->node, &dev->user_clients);
711 p = &dev->kernel_clients.rb_node;
714 entry = rb_entry(parent, struct ion_client, node);
718 else if (client > entry)
721 rb_link_node(&client->node, parent, p);
722 rb_insert_color(&client->node, &dev->kernel_clients);
725 snprintf(debug_name, 64, "%u", client->pid);
726 client->debug_root = debugfs_create_file(debug_name, 0664,
727 dev->debug_root, client,
729 mutex_unlock(&dev->lock);
734 static void _ion_client_destroy(struct kref *kref)
736 struct ion_client *client = container_of(kref, struct ion_client, ref);
737 struct ion_device *dev = client->dev;
740 pr_debug("%s: %d\n", __func__, __LINE__);
741 while ((n = rb_first(&client->handles))) {
742 struct ion_handle *handle = rb_entry(n, struct ion_handle,
744 ion_handle_destroy(&handle->ref);
746 mutex_lock(&dev->lock);
748 rb_erase(&client->node, &dev->user_clients);
749 put_task_struct(client->task);
751 rb_erase(&client->node, &dev->kernel_clients);
753 debugfs_remove_recursive(client->debug_root);
754 mutex_unlock(&dev->lock);
759 static void ion_client_get(struct ion_client *client)
761 kref_get(&client->ref);
764 static int ion_client_put(struct ion_client *client)
766 return kref_put(&client->ref, _ion_client_destroy);
769 void ion_client_destroy(struct ion_client *client)
771 ion_client_put(client);
774 static int ion_share_release(struct inode *inode, struct file* file)
776 struct ion_buffer *buffer = file->private_data;
778 pr_debug("%s: %d\n", __func__, __LINE__);
779 /* drop the reference to the buffer -- this prevents the
780 buffer from going away because the client holding it exited
781 while it was being passed */
782 ion_buffer_put(buffer);
786 static void ion_vma_open(struct vm_area_struct *vma)
789 struct ion_buffer *buffer = vma->vm_file->private_data;
790 struct ion_handle *handle = vma->vm_private_data;
791 struct ion_client *client;
793 pr_debug("%s: %d\n", __func__, __LINE__);
794 /* check that the client still exists and take a reference so
795 it can't go away until this vma is closed */
796 client = ion_client_lookup(buffer->dev, current->group_leader);
797 if (IS_ERR_OR_NULL(client)) {
798 vma->vm_private_data = NULL;
801 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
803 atomic_read(&client->ref.refcount),
804 atomic_read(&handle->ref.refcount),
805 atomic_read(&buffer->ref.refcount));
808 static void ion_vma_close(struct vm_area_struct *vma)
810 struct ion_handle *handle = vma->vm_private_data;
811 struct ion_buffer *buffer = vma->vm_file->private_data;
812 struct ion_client *client;
814 pr_debug("%s: %d\n", __func__, __LINE__);
815 /* this indicates the client is gone, nothing to do here */
818 client = handle->client;
819 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
821 atomic_read(&client->ref.refcount),
822 atomic_read(&handle->ref.refcount),
823 atomic_read(&buffer->ref.refcount));
824 ion_handle_put(handle);
825 ion_client_put(client);
826 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
828 atomic_read(&client->ref.refcount),
829 atomic_read(&handle->ref.refcount),
830 atomic_read(&buffer->ref.refcount));
833 static struct vm_operations_struct ion_vm_ops = {
834 .open = ion_vma_open,
835 .close = ion_vma_close,
838 static int ion_share_mmap(struct file *file, struct vm_area_struct *vma)
840 struct ion_buffer *buffer = file->private_data;
841 unsigned long size = vma->vm_end - vma->vm_start;
842 struct ion_client *client;
843 struct ion_handle *handle;
846 pr_debug("%s: %d\n", __func__, __LINE__);
847 /* make sure the client still exists, it's possible for the client to
848 have gone away but the map/share fd still to be around, take
849 a reference to it so it can't go away while this mapping exists */
850 client = ion_client_lookup(buffer->dev, current->group_leader);
851 if (IS_ERR_OR_NULL(client)) {
852 pr_err("%s: trying to mmap an ion handle in a process with no "
853 "ion client\n", __func__);
857 if ((size > buffer->size) || (size + (vma->vm_pgoff << PAGE_SHIFT) >
859 pr_err("%s: trying to map larger area than handle has available"
865 /* find the handle and take a reference to it */
866 handle = ion_import(client, buffer);
867 if (IS_ERR_OR_NULL(handle)) {
872 if (!handle->buffer->heap->ops->map_user) {
873 pr_err("%s: this heap does not define a method for mapping "
874 "to userspace\n", __func__);
879 mutex_lock(&buffer->lock);
880 /* now map it to userspace */
881 ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
882 mutex_unlock(&buffer->lock);
884 pr_err("%s: failure mapping buffer to userspace\n",
889 vma->vm_ops = &ion_vm_ops;
890 /* move the handle into the vm_private_data so we can access it from
892 vma->vm_private_data = handle;
893 pr_debug("%s: %d client_cnt %d handle_cnt %d alloc_cnt %d\n",
895 atomic_read(&client->ref.refcount),
896 atomic_read(&handle->ref.refcount),
897 atomic_read(&buffer->ref.refcount));
901 /* drop the reference to the handle */
902 ion_handle_put(handle);
904 /* drop the reference to the client */
905 ion_client_put(client);
909 static const struct file_operations ion_share_fops = {
910 .owner = THIS_MODULE,
911 .release = ion_share_release,
912 .mmap = ion_share_mmap,
915 static int ion_ioctl_share(struct file *parent, struct ion_client *client,
916 struct ion_handle *handle)
918 int fd = get_unused_fd();
924 file = anon_inode_getfile("ion_share_fd", &ion_share_fops,
925 handle->buffer, O_RDWR);
926 if (IS_ERR_OR_NULL(file))
928 ion_buffer_get(handle->buffer);
929 fd_install(fd, file);
938 static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
940 struct ion_client *client = filp->private_data;
945 struct ion_allocation_data data;
947 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
949 data.handle = ion_alloc(client, data.len, data.align,
951 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
957 struct ion_handle_data data;
960 if (copy_from_user(&data, (void __user *)arg,
961 sizeof(struct ion_handle_data)))
963 mutex_lock(&client->lock);
964 valid = ion_handle_validate(client, data.handle);
965 mutex_unlock(&client->lock);
968 ion_free(client, data.handle);
974 struct ion_fd_data data;
976 if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
978 mutex_lock(&client->lock);
979 if (!ion_handle_validate(client, data.handle)) {
980 pr_err("%s: invalid handle passed to share ioctl.\n",
982 mutex_unlock(&client->lock);
985 data.fd = ion_ioctl_share(filp, client, data.handle);
986 mutex_unlock(&client->lock);
987 if (copy_to_user((void __user *)arg, &data, sizeof(data)))
993 struct ion_fd_data data;
994 if (copy_from_user(&data, (void __user *)arg,
995 sizeof(struct ion_fd_data)))
998 data.handle = ion_import_fd(client, data.fd);
999 if (IS_ERR(data.handle))
1001 if (copy_to_user((void __user *)arg, &data,
1002 sizeof(struct ion_fd_data)))
1006 case ION_IOC_CUSTOM:
1008 struct ion_device *dev = client->dev;
1009 struct ion_custom_data data;
1011 if (!dev->custom_ioctl)
1013 if (copy_from_user(&data, (void __user *)arg,
1014 sizeof(struct ion_custom_data)))
1016 return dev->custom_ioctl(client, data.cmd, data.arg);
1024 static int ion_release(struct inode *inode, struct file *file)
1026 struct ion_client *client = file->private_data;
1028 pr_debug("%s: %d\n", __func__, __LINE__);
1029 ion_client_put(client);
1033 static int ion_open(struct inode *inode, struct file *file)
1035 struct miscdevice *miscdev = file->private_data;
1036 struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1037 struct ion_client *client;
1039 pr_debug("%s: %d\n", __func__, __LINE__);
1040 client = ion_client_create(dev, -1, "user");
1041 if (IS_ERR_OR_NULL(client))
1042 return PTR_ERR(client);
1043 file->private_data = client;
1048 static const struct file_operations ion_fops = {
1049 .owner = THIS_MODULE,
1051 .release = ion_release,
1052 .unlocked_ioctl = ion_ioctl,
1055 static size_t ion_debug_heap_total(struct ion_client *client,
1056 enum ion_heap_type type)
1061 mutex_lock(&client->lock);
1062 for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1063 struct ion_handle *handle = rb_entry(n,
1066 if (handle->buffer->heap->type == type)
1067 size += handle->buffer->size;
1069 mutex_unlock(&client->lock);
1073 static int ion_debug_heap_show(struct seq_file *s, void *unused)
1075 struct ion_heap *heap = s->private;
1076 struct ion_device *dev = heap->dev;
1079 seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size");
1080 for (n = rb_first(&dev->user_clients); n; n = rb_next(n)) {
1081 struct ion_client *client = rb_entry(n, struct ion_client,
1083 char task_comm[TASK_COMM_LEN];
1084 size_t size = ion_debug_heap_total(client, heap->type);
1088 get_task_comm(task_comm, client->task);
1089 seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid,
1093 for (n = rb_first(&dev->kernel_clients); n; n = rb_next(n)) {
1094 struct ion_client *client = rb_entry(n, struct ion_client,
1096 size_t size = ion_debug_heap_total(client, heap->type);
1099 seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid,
1105 static int ion_debug_heap_open(struct inode *inode, struct file *file)
1107 return single_open(file, ion_debug_heap_show, inode->i_private);
1110 static const struct file_operations debug_heap_fops = {
1111 .open = ion_debug_heap_open,
1113 .llseek = seq_lseek,
1114 .release = single_release,
1117 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1119 struct rb_node **p = &dev->heaps.rb_node;
1120 struct rb_node *parent = NULL;
1121 struct ion_heap *entry;
1124 mutex_lock(&dev->lock);
1127 entry = rb_entry(parent, struct ion_heap, node);
1129 if (heap->id < entry->id) {
1131 } else if (heap->id > entry->id ) {
1132 p = &(*p)->rb_right;
1134 pr_err("%s: can not insert multiple heaps with "
1135 "id %d\n", __func__, heap->id);
1140 rb_link_node(&heap->node, parent, p);
1141 rb_insert_color(&heap->node, &dev->heaps);
1142 debugfs_create_file(heap->name, 0664, dev->debug_root, heap,
1145 mutex_unlock(&dev->lock);
1148 struct ion_device *ion_device_create(long (*custom_ioctl)
1149 (struct ion_client *client,
1153 struct ion_device *idev;
1156 idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1158 return ERR_PTR(-ENOMEM);
1160 idev->dev.minor = MISC_DYNAMIC_MINOR;
1161 idev->dev.name = "ion";
1162 idev->dev.fops = &ion_fops;
1163 idev->dev.parent = NULL;
1164 ret = misc_register(&idev->dev);
1166 pr_err("ion: failed to register misc device.\n");
1167 return ERR_PTR(ret);
1170 idev->debug_root = debugfs_create_dir("ion", NULL);
1171 if (IS_ERR_OR_NULL(idev->debug_root))
1172 pr_err("ion: failed to create debug files.\n");
1174 idev->custom_ioctl = custom_ioctl;
1175 idev->buffers = RB_ROOT;
1176 mutex_init(&idev->lock);
1177 idev->heaps = RB_ROOT;
1178 idev->user_clients = RB_ROOT;
1179 idev->kernel_clients = RB_ROOT;
1183 void ion_device_destroy(struct ion_device *dev)
1185 misc_deregister(&dev->dev);
1186 /* XXX need to free the heaps and clients ? */
1190 void __init ion_reserve(struct ion_platform_data *data)
1194 for (i = 0; i < data->nr; i++) {
1195 if (data->heaps[i].size == 0)
1197 ret = memblock_reserve(data->heaps[i].base,
1198 data->heaps[i].size);
1200 pr_err("memblock reserve of %x@%lx failed\n",
1201 data->heaps[i].size,
1202 data->heaps[i].base);